From e6918187568dbd01842d8d1d2c808ce16a894239 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 21 Apr 2024 13:54:28 +0200 Subject: Adding upstream version 18.2.2. Signed-off-by: Daniel Baumann --- src/rocksdb/java/CMakeLists.txt | 549 ++ src/rocksdb/java/HISTORY-JAVA.md | 86 + src/rocksdb/java/Makefile | 452 + src/rocksdb/java/RELEASE.md | 59 + .../java/org/rocksdb/benchmark/DbBenchmark.java | 1640 ++++ src/rocksdb/java/crossbuild/Vagrantfile | 51 + src/rocksdb/java/crossbuild/build-linux-alpine.sh | 70 + src/rocksdb/java/crossbuild/build-linux-centos.sh | 38 + src/rocksdb/java/crossbuild/build-linux.sh | 15 + .../java/crossbuild/docker-build-linux-alpine.sh | 17 + .../java/crossbuild/docker-build-linux-centos.sh | 38 + src/rocksdb/java/jdb_bench.sh | 13 + src/rocksdb/java/jmh/LICENSE-HEADER.txt | 5 + src/rocksdb/java/jmh/README.md | 24 + src/rocksdb/java/jmh/pom.xml | 138 + .../java/org/rocksdb/jmh/ComparatorBenchmarks.java | 139 + .../main/java/org/rocksdb/jmh/GetBenchmarks.java | 139 + .../java/org/rocksdb/jmh/MultiGetBenchmarks.java | 232 + .../main/java/org/rocksdb/jmh/PutBenchmarks.java | 112 + .../src/main/java/org/rocksdb/util/FileUtils.java | 59 + .../src/main/java/org/rocksdb/util/KVUtils.java | 72 + src/rocksdb/java/pom.xml.template | 178 + src/rocksdb/java/rocksjni/backup_engine_options.cc | 365 + src/rocksdb/java/rocksjni/backupenginejni.cc | 279 + src/rocksdb/java/rocksjni/cache.cc | 35 + .../java/rocksjni/cassandra_compactionfilterjni.cc | 25 + .../java/rocksjni/cassandra_value_operator.cc | 50 + src/rocksdb/java/rocksjni/checkpoint.cc | 71 + src/rocksdb/java/rocksjni/clock_cache.cc | 42 + src/rocksdb/java/rocksjni/columnfamilyhandle.cc | 72 + src/rocksdb/java/rocksjni/compact_range_options.cc | 222 + src/rocksdb/java/rocksjni/compaction_filter.cc | 29 + .../java/rocksjni/compaction_filter_factory.cc | 42 + .../compaction_filter_factory_jnicallback.cc | 79 + .../compaction_filter_factory_jnicallback.h | 37 + src/rocksdb/java/rocksjni/compaction_job_info.cc | 230 + src/rocksdb/java/rocksjni/compaction_job_stats.cc | 345 + src/rocksdb/java/rocksjni/compaction_options.cc | 112 + .../java/rocksjni/compaction_options_fifo.cc | 83 + .../java/rocksjni/compaction_options_universal.cc | 209 + src/rocksdb/java/rocksjni/comparator.cc | 60 + src/rocksdb/java/rocksjni/comparatorjnicallback.cc | 646 ++ src/rocksdb/java/rocksjni/comparatorjnicallback.h | 141 + src/rocksdb/java/rocksjni/compression_options.cc | 214 + .../java/rocksjni/concurrent_task_limiter.cc | 97 + src/rocksdb/java/rocksjni/config_options.cc | 90 + .../java/rocksjni/cplusplus_to_java_convert.h | 37 + src/rocksdb/java/rocksjni/env.cc | 205 + src/rocksdb/java/rocksjni/env_options.cc | 305 + src/rocksdb/java/rocksjni/event_listener.cc | 44 + .../java/rocksjni/event_listener_jnicallback.cc | 502 ++ .../java/rocksjni/event_listener_jnicallback.h | 122 + src/rocksdb/java/rocksjni/filter.cc | 46 + .../java/rocksjni/ingest_external_file_options.cc | 199 + src/rocksdb/java/rocksjni/iterator.cc | 340 + src/rocksdb/java/rocksjni/jnicallback.cc | 54 + src/rocksdb/java/rocksjni/jnicallback.h | 32 + src/rocksdb/java/rocksjni/loggerjnicallback.cc | 299 + src/rocksdb/java/rocksjni/loggerjnicallback.h | 51 + src/rocksdb/java/rocksjni/lru_cache.cc | 49 + src/rocksdb/java/rocksjni/memory_util.cc | 100 + src/rocksdb/java/rocksjni/memtablejni.cc | 94 + src/rocksdb/java/rocksjni/merge_operator.cc | 98 + .../rocksjni/native_comparator_wrapper_test.cc | 45 + .../java/rocksjni/optimistic_transaction_db.cc | 270 + .../rocksjni/optimistic_transaction_options.cc | 78 + src/rocksdb/java/rocksjni/options.cc | 8687 +++++++++++++++++++ src/rocksdb/java/rocksjni/options_util.cc | 195 + src/rocksdb/java/rocksjni/persistent_cache.cc | 60 + src/rocksdb/java/rocksjni/portal.h | 8745 ++++++++++++++++++++ src/rocksdb/java/rocksjni/ratelimiterjni.cc | 128 + .../remove_emptyvalue_compactionfilterjni.cc | 24 + src/rocksdb/java/rocksjni/restorejni.cc | 42 + src/rocksdb/java/rocksjni/rocks_callback_object.cc | 30 + .../java/rocksjni/rocksdb_exception_test.cc | 81 + src/rocksdb/java/rocksjni/rocksjni.cc | 3947 +++++++++ src/rocksdb/java/rocksjni/slice.cc | 374 + src/rocksdb/java/rocksjni/snapshot.cc | 27 + src/rocksdb/java/rocksjni/sst_file_manager.cc | 250 + .../java/rocksjni/sst_file_reader_iterator.cc | 373 + src/rocksdb/java/rocksjni/sst_file_readerjni.cc | 118 + src/rocksdb/java/rocksjni/sst_file_writerjni.cc | 310 + src/rocksdb/java/rocksjni/sst_partitioner.cc | 43 + src/rocksdb/java/rocksjni/statistics.cc | 268 + src/rocksdb/java/rocksjni/statisticsjni.cc | 31 + src/rocksdb/java/rocksjni/statisticsjni.h | 34 + src/rocksdb/java/rocksjni/table.cc | 161 + src/rocksdb/java/rocksjni/table_filter.cc | 27 + .../java/rocksjni/table_filter_jnicallback.cc | 66 + .../java/rocksjni/table_filter_jnicallback.h | 36 + .../java/rocksjni/testable_event_listener.cc | 219 + src/rocksdb/java/rocksjni/thread_status.cc | 125 + src/rocksdb/java/rocksjni/trace_writer.cc | 24 + .../java/rocksjni/trace_writer_jnicallback.cc | 118 + .../java/rocksjni/trace_writer_jnicallback.h | 36 + src/rocksdb/java/rocksjni/transaction.cc | 1655 ++++ src/rocksdb/java/rocksjni/transaction_db.cc | 451 + .../java/rocksjni/transaction_db_options.cc | 169 + src/rocksdb/java/rocksjni/transaction_log.cc | 80 + src/rocksdb/java/rocksjni/transaction_notifier.cc | 44 + .../rocksjni/transaction_notifier_jnicallback.cc | 42 + .../rocksjni/transaction_notifier_jnicallback.h | 42 + src/rocksdb/java/rocksjni/transaction_options.cc | 191 + src/rocksdb/java/rocksjni/ttl.cc | 212 + src/rocksdb/java/rocksjni/wal_filter.cc | 24 + .../java/rocksjni/wal_filter_jnicallback.cc | 139 + src/rocksdb/java/rocksjni/wal_filter_jnicallback.h | 42 + src/rocksdb/java/rocksjni/write_batch.cc | 676 ++ src/rocksdb/java/rocksjni/write_batch_test.cc | 199 + .../java/rocksjni/write_batch_with_index.cc | 953 +++ src/rocksdb/java/rocksjni/write_buffer_manager.cc | 45 + .../java/rocksjni/writebatchhandlerjnicallback.cc | 519 ++ .../java/rocksjni/writebatchhandlerjnicallback.h | 92 + .../src/main/java/OptimisticTransactionSample.java | 184 + .../src/main/java/RocksDBColumnFamilySample.java | 78 + .../java/samples/src/main/java/RocksDBSample.java | 296 + .../samples/src/main/java/TransactionSample.java | 183 + .../java/org/rocksdb/AbstractCompactionFilter.java | 59 + .../rocksdb/AbstractCompactionFilterFactory.java | 77 + .../main/java/org/rocksdb/AbstractComparator.java | 124 + .../org/rocksdb/AbstractComparatorJniBridge.java | 125 + .../java/org/rocksdb/AbstractEventListener.java | 334 + .../rocksdb/AbstractImmutableNativeReference.java | 65 + .../java/org/rocksdb/AbstractMutableOptions.java | 370 + .../java/org/rocksdb/AbstractNativeReference.java | 48 + .../java/org/rocksdb/AbstractRocksIterator.java | 146 + .../src/main/java/org/rocksdb/AbstractSlice.java | 191 + .../main/java/org/rocksdb/AbstractTableFilter.java | 20 + .../main/java/org/rocksdb/AbstractTraceWriter.java | 70 + .../org/rocksdb/AbstractTransactionNotifier.java | 54 + .../main/java/org/rocksdb/AbstractWalFilter.java | 49 + .../main/java/org/rocksdb/AbstractWriteBatch.java | 204 + .../java/src/main/java/org/rocksdb/AccessHint.java | 53 + .../AdvancedColumnFamilyOptionsInterface.java | 464 ++ ...dvancedMutableColumnFamilyOptionsInterface.java | 830 ++ .../java/org/rocksdb/BackgroundErrorReason.java | 46 + .../src/main/java/org/rocksdb/BackupEngine.java | 259 + .../main/java/org/rocksdb/BackupEngineOptions.java | 458 + .../java/src/main/java/org/rocksdb/BackupInfo.java | 76 + .../java/org/rocksdb/BlockBasedTableConfig.java | 1055 +++ .../src/main/java/org/rocksdb/BloomFilter.java | 73 + .../main/java/org/rocksdb/BuiltinComparator.java | 20 + .../main/java/org/rocksdb/ByteBufferGetStatus.java | 50 + .../java/src/main/java/org/rocksdb/Cache.java | 40 + .../org/rocksdb/CassandraCompactionFilter.java | 19 + .../org/rocksdb/CassandraValueMergeOperator.java | 25 + .../java/src/main/java/org/rocksdb/Checkpoint.java | 66 + .../src/main/java/org/rocksdb/ChecksumType.java | 45 + .../java/src/main/java/org/rocksdb/ClockCache.java | 59 + .../java/org/rocksdb/ColumnFamilyDescriptor.java | 84 + .../main/java/org/rocksdb/ColumnFamilyHandle.java | 151 + .../java/org/rocksdb/ColumnFamilyMetaData.java | 70 + .../main/java/org/rocksdb/ColumnFamilyOptions.java | 1540 ++++ .../org/rocksdb/ColumnFamilyOptionsInterface.java | 536 ++ .../main/java/org/rocksdb/CompactRangeOptions.java | 238 + .../main/java/org/rocksdb/CompactionJobInfo.java | 161 + .../main/java/org/rocksdb/CompactionJobStats.java | 295 + .../main/java/org/rocksdb/CompactionOptions.java | 121 + .../java/org/rocksdb/CompactionOptionsFIFO.java | 89 + .../org/rocksdb/CompactionOptionsUniversal.java | 273 + .../main/java/org/rocksdb/CompactionPriority.java | 81 + .../main/java/org/rocksdb/CompactionReason.java | 125 + .../main/java/org/rocksdb/CompactionStopStyle.java | 55 + .../src/main/java/org/rocksdb/CompactionStyle.java | 80 + .../main/java/org/rocksdb/ComparatorOptions.java | 133 + .../src/main/java/org/rocksdb/ComparatorType.java | 48 + .../main/java/org/rocksdb/CompressionOptions.java | 151 + .../src/main/java/org/rocksdb/CompressionType.java | 121 + .../java/org/rocksdb/ConcurrentTaskLimiter.java | 44 + .../org/rocksdb/ConcurrentTaskLimiterImpl.java | 48 + .../src/main/java/org/rocksdb/ConfigOptions.java | 53 + .../java/src/main/java/org/rocksdb/DBOptions.java | 1495 ++++ .../main/java/org/rocksdb/DBOptionsInterface.java | 1756 ++++ .../main/java/org/rocksdb/DataBlockIndexType.java | 32 + .../java/src/main/java/org/rocksdb/DbPath.java | 47 + .../src/main/java/org/rocksdb/DirectSlice.java | 137 + .../src/main/java/org/rocksdb/EncodingType.java | 55 + .../java/src/main/java/org/rocksdb/Env.java | 167 + .../java/src/main/java/org/rocksdb/EnvOptions.java | 366 + .../src/main/java/org/rocksdb/EventListener.java | 335 + .../src/main/java/org/rocksdb/Experimental.java | 23 + .../org/rocksdb/ExternalFileIngestionInfo.java | 103 + .../main/java/org/rocksdb/FileOperationInfo.java | 112 + .../java/src/main/java/org/rocksdb/Filter.java | 36 + .../src/main/java/org/rocksdb/FlushJobInfo.java | 186 + .../src/main/java/org/rocksdb/FlushOptions.java | 90 + .../src/main/java/org/rocksdb/FlushReason.java | 53 + .../org/rocksdb/HashLinkedListMemTableConfig.java | 174 + .../org/rocksdb/HashSkipListMemTableConfig.java | 106 + .../src/main/java/org/rocksdb/HistogramData.java | 75 + .../src/main/java/org/rocksdb/HistogramType.java | 221 + .../java/src/main/java/org/rocksdb/Holder.java | 46 + .../main/java/org/rocksdb/IndexShorteningMode.java | 60 + .../java/src/main/java/org/rocksdb/IndexType.java | 55 + .../src/main/java/org/rocksdb/InfoLogLevel.java | 49 + .../org/rocksdb/IngestExternalFileOptions.java | 227 + .../src/main/java/org/rocksdb/KeyMayExist.java | 36 + .../java/src/main/java/org/rocksdb/LRUCache.java | 106 + .../src/main/java/org/rocksdb/LevelMetaData.java | 56 + .../main/java/org/rocksdb/LiveFileMetaData.java | 55 + .../java/src/main/java/org/rocksdb/LogFile.java | 75 + .../java/src/main/java/org/rocksdb/Logger.java | 122 + .../src/main/java/org/rocksdb/MemTableConfig.java | 29 + .../src/main/java/org/rocksdb/MemTableInfo.java | 103 + .../src/main/java/org/rocksdb/MemoryUsageType.java | 72 + .../java/src/main/java/org/rocksdb/MemoryUtil.java | 60 + .../src/main/java/org/rocksdb/MergeOperator.java | 18 + .../org/rocksdb/MutableColumnFamilyOptions.java | 623 ++ .../MutableColumnFamilyOptionsInterface.java | 156 + .../main/java/org/rocksdb/MutableDBOptions.java | 294 + .../org/rocksdb/MutableDBOptionsInterface.java | 440 + .../main/java/org/rocksdb/MutableOptionKey.java | 16 + .../main/java/org/rocksdb/MutableOptionValue.java | 369 + .../java/org/rocksdb/NativeComparatorWrapper.java | 59 + .../main/java/org/rocksdb/NativeLibraryLoader.java | 172 + .../src/main/java/org/rocksdb/OperationStage.java | 59 + .../src/main/java/org/rocksdb/OperationType.java | 54 + .../java/org/rocksdb/OptimisticTransactionDB.java | 226 + .../org/rocksdb/OptimisticTransactionOptions.java | 53 + .../src/main/java/org/rocksdb/OptionString.java | 256 + .../java/src/main/java/org/rocksdb/Options.java | 2578 ++++++ .../src/main/java/org/rocksdb/OptionsUtil.java | 184 + .../src/main/java/org/rocksdb/PersistentCache.java | 26 + .../main/java/org/rocksdb/PlainTableConfig.java | 251 + .../java/org/rocksdb/PrepopulateBlobCache.java | 117 + .../java/src/main/java/org/rocksdb/Priority.java | 49 + .../java/src/main/java/org/rocksdb/Range.java | 19 + .../src/main/java/org/rocksdb/RateLimiter.java | 227 + .../src/main/java/org/rocksdb/RateLimiterMode.java | 52 + .../src/main/java/org/rocksdb/ReadOptions.java | 831 ++ .../java/src/main/java/org/rocksdb/ReadTier.java | 49 + .../rocksdb/RemoveEmptyValueCompactionFilter.java | 18 + .../src/main/java/org/rocksdb/RestoreOptions.java | 32 + .../org/rocksdb/ReusedSynchronisationType.java | 65 + .../main/java/org/rocksdb/RocksCallbackObject.java | 73 + .../java/src/main/java/org/rocksdb/RocksDB.java | 4694 +++++++++++ .../main/java/org/rocksdb/RocksDBException.java | 44 + .../java/src/main/java/org/rocksdb/RocksEnv.java | 32 + .../src/main/java/org/rocksdb/RocksIterator.java | 140 + .../java/org/rocksdb/RocksIteratorInterface.java | 127 + .../src/main/java/org/rocksdb/RocksMemEnv.java | 31 + .../main/java/org/rocksdb/RocksMutableObject.java | 87 + .../src/main/java/org/rocksdb/RocksObject.java | 45 + .../src/main/java/org/rocksdb/SanityLevel.java | 47 + .../java/org/rocksdb/SizeApproximationFlag.java | 31 + .../java/org/rocksdb/SkipListMemTableConfig.java | 51 + .../java/src/main/java/org/rocksdb/Slice.java | 136 + .../java/src/main/java/org/rocksdb/Snapshot.java | 41 + .../src/main/java/org/rocksdb/SstFileManager.java | 251 + .../src/main/java/org/rocksdb/SstFileMetaData.java | 162 + .../src/main/java/org/rocksdb/SstFileReader.java | 82 + .../java/org/rocksdb/SstFileReaderIterator.java | 140 + .../src/main/java/org/rocksdb/SstFileWriter.java | 238 + .../java/org/rocksdb/SstPartitionerFactory.java | 15 + .../rocksdb/SstPartitionerFixedPrefixFactory.java | 19 + .../java/src/main/java/org/rocksdb/StateType.java | 53 + .../java/src/main/java/org/rocksdb/Statistics.java | 152 + .../main/java/org/rocksdb/StatisticsCollector.java | 111 + .../org/rocksdb/StatisticsCollectorCallback.java | 32 + .../main/java/org/rocksdb/StatsCollectorInput.java | 35 + .../java/src/main/java/org/rocksdb/StatsLevel.java | 65 + .../java/src/main/java/org/rocksdb/Status.java | 155 + .../java/org/rocksdb/StringAppendOperator.java | 29 + .../org/rocksdb/TableFileCreationBriefInfo.java | 107 + .../java/org/rocksdb/TableFileCreationInfo.java | 86 + .../java/org/rocksdb/TableFileCreationReason.java | 46 + .../java/org/rocksdb/TableFileDeletionInfo.java | 86 + .../src/main/java/org/rocksdb/TableFilter.java | 21 + .../main/java/org/rocksdb/TableFormatConfig.java | 22 + .../src/main/java/org/rocksdb/TableProperties.java | 426 + .../src/main/java/org/rocksdb/ThreadStatus.java | 224 + .../java/src/main/java/org/rocksdb/ThreadType.java | 65 + .../java/src/main/java/org/rocksdb/TickerType.java | 874 ++ .../java/src/main/java/org/rocksdb/TimedEnv.java | 30 + .../src/main/java/org/rocksdb/TraceOptions.java | 32 + .../src/main/java/org/rocksdb/TraceWriter.java | 36 + .../src/main/java/org/rocksdb/Transaction.java | 2170 +++++ .../src/main/java/org/rocksdb/TransactionDB.java | 403 + .../java/org/rocksdb/TransactionDBOptions.java | 217 + .../java/org/rocksdb/TransactionLogIterator.java | 112 + .../main/java/org/rocksdb/TransactionOptions.java | 189 + .../src/main/java/org/rocksdb/TransactionalDB.java | 65 + .../java/org/rocksdb/TransactionalOptions.java | 31 + .../java/src/main/java/org/rocksdb/TtlDB.java | 245 + .../main/java/org/rocksdb/TxnDBWritePolicy.java | 62 + .../main/java/org/rocksdb/UInt64AddOperator.java | 19 + .../java/org/rocksdb/VectorMemTableConfig.java | 46 + .../src/main/java/org/rocksdb/WALRecoveryMode.java | 83 + .../main/java/org/rocksdb/WBWIRocksIterator.java | 203 + .../src/main/java/org/rocksdb/WalFileType.java | 55 + .../java/src/main/java/org/rocksdb/WalFilter.java | 87 + .../main/java/org/rocksdb/WalProcessingOption.java | 54 + .../java/src/main/java/org/rocksdb/WriteBatch.java | 396 + .../main/java/org/rocksdb/WriteBatchInterface.java | 283 + .../main/java/org/rocksdb/WriteBatchWithIndex.java | 361 + .../main/java/org/rocksdb/WriteBufferManager.java | 50 + .../src/main/java/org/rocksdb/WriteOptions.java | 256 + .../main/java/org/rocksdb/WriteStallCondition.java | 44 + .../src/main/java/org/rocksdb/WriteStallInfo.java | 75 + .../src/main/java/org/rocksdb/util/ByteUtil.java | 52 + .../java/org/rocksdb/util/BytewiseComparator.java | 121 + .../main/java/org/rocksdb/util/Environment.java | 245 + .../main/java/org/rocksdb/util/IntComparator.java | 67 + .../rocksdb/util/ReverseBytewiseComparator.java | 88 + .../src/main/java/org/rocksdb/util/SizeUnit.java | 16 + .../java/org/rocksdb/AbstractTransactionTest.java | 965 +++ .../java/org/rocksdb/BackupEngineOptionsTest.java | 300 + .../test/java/org/rocksdb/BackupEngineTest.java | 261 + .../src/test/java/org/rocksdb/BlobOptionsTest.java | 351 + .../org/rocksdb/BlockBasedTableConfigTest.java | 490 ++ .../java/org/rocksdb/BuiltinComparatorTest.java | 145 + .../rocksdb/BytewiseComparatorRegressionTest.java | 126 + .../src/test/java/org/rocksdb/CheckPointTest.java | 83 + .../src/test/java/org/rocksdb/ClockCacheTest.java | 26 + .../java/org/rocksdb/ColumnFamilyOptionsTest.java | 714 ++ .../test/java/org/rocksdb/ColumnFamilyTest.java | 582 ++ .../java/org/rocksdb/CompactRangeOptionsTest.java | 98 + .../org/rocksdb/CompactionFilterFactoryTest.java | 61 + .../java/org/rocksdb/CompactionJobInfoTest.java | 114 + .../java/org/rocksdb/CompactionJobStatsTest.java | 196 + .../org/rocksdb/CompactionOptionsFIFOTest.java | 35 + .../java/org/rocksdb/CompactionOptionsTest.java | 52 + .../rocksdb/CompactionOptionsUniversalTest.java | 80 + .../java/org/rocksdb/CompactionPriorityTest.java | 31 + .../java/org/rocksdb/CompactionStopStyleTest.java | 31 + .../java/org/rocksdb/ComparatorOptionsTest.java | 58 + .../java/org/rocksdb/CompressionOptionsTest.java | 71 + .../java/org/rocksdb/CompressionTypesTest.java | 20 + .../org/rocksdb/ConcurrentTaskLimiterTest.java | 56 + .../src/test/java/org/rocksdb/DBOptionsTest.java | 904 ++ .../src/test/java/org/rocksdb/DefaultEnvTest.java | 113 + .../src/test/java/org/rocksdb/DirectSliceTest.java | 93 + .../src/test/java/org/rocksdb/EnvOptionsTest.java | 145 + .../test/java/org/rocksdb/EventListenerTest.java | 725 ++ .../java/src/test/java/org/rocksdb/FilterTest.java | 39 + .../test/java/org/rocksdb/FlushOptionsTest.java | 31 + .../java/src/test/java/org/rocksdb/FlushTest.java | 49 + .../test/java/org/rocksdb/InfoLogLevelTest.java | 109 + .../org/rocksdb/IngestExternalFileOptionsTest.java | 107 + .../src/test/java/org/rocksdb/KeyMayExistTest.java | 528 ++ .../src/test/java/org/rocksdb/LRUCacheTest.java | 32 + .../java/src/test/java/org/rocksdb/LoggerTest.java | 239 + .../src/test/java/org/rocksdb/MemTableTest.java | 111 + .../src/test/java/org/rocksdb/MemoryUtilTest.java | 144 + .../java/src/test/java/org/rocksdb/MergeTest.java | 465 ++ .../test/java/org/rocksdb/MixedOptionsTest.java | 55 + .../org/rocksdb/MultiColumnRegressionTest.java | 146 + .../java/org/rocksdb/MultiGetManyKeysTest.java | 241 + .../src/test/java/org/rocksdb/MultiGetTest.java | 525 ++ .../rocksdb/MutableColumnFamilyOptionsTest.java | 167 + .../java/org/rocksdb/MutableDBOptionsTest.java | 85 + .../java/org/rocksdb/MutableOptionsGetSetTest.java | 429 + .../org/rocksdb/NativeComparatorWrapperTest.java | 95 + .../java/org/rocksdb/NativeLibraryLoaderTest.java | 41 + .../org/rocksdb/OptimisticTransactionDBTest.java | 131 + .../rocksdb/OptimisticTransactionOptionsTest.java | 38 + .../org/rocksdb/OptimisticTransactionTest.java | 446 + .../src/test/java/org/rocksdb/OptionsTest.java | 1492 ++++ .../src/test/java/org/rocksdb/OptionsUtilTest.java | 126 + .../java/org/rocksdb/PlainTableConfigTest.java | 89 + .../java/org/rocksdb/PlatformRandomHelper.java | 58 + .../java/org/rocksdb/PutMultiplePartsTest.java | 164 + .../src/test/java/org/rocksdb/RateLimiterTest.java | 65 + .../src/test/java/org/rocksdb/ReadOnlyTest.java | 234 + .../src/test/java/org/rocksdb/ReadOptionsTest.java | 375 + .../java/org/rocksdb/RocksDBExceptionTest.java | 115 + .../src/test/java/org/rocksdb/RocksDBTest.java | 1695 ++++ .../test/java/org/rocksdb/RocksIteratorTest.java | 289 + .../src/test/java/org/rocksdb/RocksMemEnvTest.java | 141 + .../org/rocksdb/RocksNativeLibraryResource.java | 18 + .../src/test/java/org/rocksdb/SecondaryDBTest.java | 135 + .../java/src/test/java/org/rocksdb/SliceTest.java | 80 + .../src/test/java/org/rocksdb/SnapshotTest.java | 169 + .../test/java/org/rocksdb/SstFileManagerTest.java | 66 + .../test/java/org/rocksdb/SstFileReaderTest.java | 222 + .../test/java/org/rocksdb/SstFileWriterTest.java | 241 + .../test/java/org/rocksdb/SstPartitionerTest.java | 72 + .../java/org/rocksdb/StatisticsCollectorTest.java | 55 + .../src/test/java/org/rocksdb/StatisticsTest.java | 168 + .../test/java/org/rocksdb/StatsCallbackMock.java | 20 + .../src/test/java/org/rocksdb/TableFilterTest.java | 106 + .../src/test/java/org/rocksdb/TimedEnvTest.java | 43 + .../java/org/rocksdb/TransactionDBOptionsTest.java | 64 + .../test/java/org/rocksdb/TransactionDBTest.java | 178 + .../org/rocksdb/TransactionLogIteratorTest.java | 139 + .../java/org/rocksdb/TransactionOptionsTest.java | 72 + .../src/test/java/org/rocksdb/TransactionTest.java | 488 ++ .../java/src/test/java/org/rocksdb/TtlDBTest.java | 112 + .../java/src/test/java/org/rocksdb/Types.java | 43 + .../test/java/org/rocksdb/VerifyChecksumsTest.java | 213 + .../test/java/org/rocksdb/WALRecoveryModeTest.java | 22 + .../src/test/java/org/rocksdb/WalFilterTest.java | 165 + .../java/org/rocksdb/WriteBatchHandlerTest.java | 76 + .../src/test/java/org/rocksdb/WriteBatchTest.java | 528 ++ .../java/org/rocksdb/WriteBatchThreadedTest.java | 104 + .../java/org/rocksdb/WriteBatchWithIndexTest.java | 1068 +++ .../test/java/org/rocksdb/WriteOptionsTest.java | 75 + .../RemoveEmptyValueCompactionFilterFactory.java | 21 + .../java/org/rocksdb/test/RocksJunitRunner.java | 174 + .../org/rocksdb/test/TestableEventListener.java | 23 + .../java/org/rocksdb/util/ByteBufferAllocator.java | 16 + .../rocksdb/util/BytewiseComparatorIntTest.java | 267 + .../org/rocksdb/util/BytewiseComparatorTest.java | 531 ++ .../rocksdb/util/CapturingWriteBatchHandler.java | 190 + .../rocksdb/util/DirectByteBufferAllocator.java | 18 + .../java/org/rocksdb/util/EnvironmentTest.java | 304 + .../org/rocksdb/util/HeapByteBufferAllocator.java | 18 + .../java/org/rocksdb/util/IntComparatorTest.java | 266 + .../java/org/rocksdb/util/JNIComparatorTest.java | 180 + .../util/ReverseBytewiseComparatorIntTest.java | 270 + .../test/java/org/rocksdb/util/SizeUnitTest.java | 27 + .../src/test/java/org/rocksdb/util/TestUtil.java | 72 + .../java/org/rocksdb/util/WriteBatchGetter.java | 139 + src/rocksdb/java/understanding_options.md | 79 + 414 files changed, 104277 insertions(+) create mode 100644 src/rocksdb/java/CMakeLists.txt create mode 100644 src/rocksdb/java/HISTORY-JAVA.md create mode 100644 src/rocksdb/java/Makefile create mode 100644 src/rocksdb/java/RELEASE.md create mode 100644 src/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java create mode 100644 src/rocksdb/java/crossbuild/Vagrantfile create mode 100755 src/rocksdb/java/crossbuild/build-linux-alpine.sh create mode 100755 src/rocksdb/java/crossbuild/build-linux-centos.sh create mode 100755 src/rocksdb/java/crossbuild/build-linux.sh create mode 100755 src/rocksdb/java/crossbuild/docker-build-linux-alpine.sh create mode 100755 src/rocksdb/java/crossbuild/docker-build-linux-centos.sh create mode 100755 src/rocksdb/java/jdb_bench.sh create mode 100644 src/rocksdb/java/jmh/LICENSE-HEADER.txt create mode 100644 src/rocksdb/java/jmh/README.md create mode 100644 src/rocksdb/java/jmh/pom.xml create mode 100644 src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java create mode 100644 src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java create mode 100644 src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java create mode 100644 src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java create mode 100644 src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/FileUtils.java create mode 100644 src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/KVUtils.java create mode 100644 src/rocksdb/java/pom.xml.template create mode 100644 src/rocksdb/java/rocksjni/backup_engine_options.cc create mode 100644 src/rocksdb/java/rocksjni/backupenginejni.cc create mode 100644 src/rocksdb/java/rocksjni/cache.cc create mode 100644 src/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc create mode 100644 src/rocksdb/java/rocksjni/cassandra_value_operator.cc create mode 100644 src/rocksdb/java/rocksjni/checkpoint.cc create mode 100644 src/rocksdb/java/rocksjni/clock_cache.cc create mode 100644 src/rocksdb/java/rocksjni/columnfamilyhandle.cc create mode 100644 src/rocksdb/java/rocksjni/compact_range_options.cc create mode 100644 src/rocksdb/java/rocksjni/compaction_filter.cc create mode 100644 src/rocksdb/java/rocksjni/compaction_filter_factory.cc create mode 100644 src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.cc create mode 100644 src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.h create mode 100644 src/rocksdb/java/rocksjni/compaction_job_info.cc create mode 100644 src/rocksdb/java/rocksjni/compaction_job_stats.cc create mode 100644 src/rocksdb/java/rocksjni/compaction_options.cc create mode 100644 src/rocksdb/java/rocksjni/compaction_options_fifo.cc create mode 100644 src/rocksdb/java/rocksjni/compaction_options_universal.cc create mode 100644 src/rocksdb/java/rocksjni/comparator.cc create mode 100644 src/rocksdb/java/rocksjni/comparatorjnicallback.cc create mode 100644 src/rocksdb/java/rocksjni/comparatorjnicallback.h create mode 100644 src/rocksdb/java/rocksjni/compression_options.cc create mode 100644 src/rocksdb/java/rocksjni/concurrent_task_limiter.cc create mode 100644 src/rocksdb/java/rocksjni/config_options.cc create mode 100644 src/rocksdb/java/rocksjni/cplusplus_to_java_convert.h create mode 100644 src/rocksdb/java/rocksjni/env.cc create mode 100644 src/rocksdb/java/rocksjni/env_options.cc create mode 100644 src/rocksdb/java/rocksjni/event_listener.cc create mode 100644 src/rocksdb/java/rocksjni/event_listener_jnicallback.cc create mode 100644 src/rocksdb/java/rocksjni/event_listener_jnicallback.h create mode 100644 src/rocksdb/java/rocksjni/filter.cc create mode 100644 src/rocksdb/java/rocksjni/ingest_external_file_options.cc create mode 100644 src/rocksdb/java/rocksjni/iterator.cc create mode 100644 src/rocksdb/java/rocksjni/jnicallback.cc create mode 100644 src/rocksdb/java/rocksjni/jnicallback.h create mode 100644 src/rocksdb/java/rocksjni/loggerjnicallback.cc create mode 100644 src/rocksdb/java/rocksjni/loggerjnicallback.h create mode 100644 src/rocksdb/java/rocksjni/lru_cache.cc create mode 100644 src/rocksdb/java/rocksjni/memory_util.cc create mode 100644 src/rocksdb/java/rocksjni/memtablejni.cc create mode 100644 src/rocksdb/java/rocksjni/merge_operator.cc create mode 100644 src/rocksdb/java/rocksjni/native_comparator_wrapper_test.cc create mode 100644 src/rocksdb/java/rocksjni/optimistic_transaction_db.cc create mode 100644 src/rocksdb/java/rocksjni/optimistic_transaction_options.cc create mode 100644 src/rocksdb/java/rocksjni/options.cc create mode 100644 src/rocksdb/java/rocksjni/options_util.cc create mode 100644 src/rocksdb/java/rocksjni/persistent_cache.cc create mode 100644 src/rocksdb/java/rocksjni/portal.h create mode 100644 src/rocksdb/java/rocksjni/ratelimiterjni.cc create mode 100644 src/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc create mode 100644 src/rocksdb/java/rocksjni/restorejni.cc create mode 100644 src/rocksdb/java/rocksjni/rocks_callback_object.cc create mode 100644 src/rocksdb/java/rocksjni/rocksdb_exception_test.cc create mode 100644 src/rocksdb/java/rocksjni/rocksjni.cc create mode 100644 src/rocksdb/java/rocksjni/slice.cc create mode 100644 src/rocksdb/java/rocksjni/snapshot.cc create mode 100644 src/rocksdb/java/rocksjni/sst_file_manager.cc create mode 100644 src/rocksdb/java/rocksjni/sst_file_reader_iterator.cc create mode 100644 src/rocksdb/java/rocksjni/sst_file_readerjni.cc create mode 100644 src/rocksdb/java/rocksjni/sst_file_writerjni.cc create mode 100644 src/rocksdb/java/rocksjni/sst_partitioner.cc create mode 100644 src/rocksdb/java/rocksjni/statistics.cc create mode 100644 src/rocksdb/java/rocksjni/statisticsjni.cc create mode 100644 src/rocksdb/java/rocksjni/statisticsjni.h create mode 100644 src/rocksdb/java/rocksjni/table.cc create mode 100644 src/rocksdb/java/rocksjni/table_filter.cc create mode 100644 src/rocksdb/java/rocksjni/table_filter_jnicallback.cc create mode 100644 src/rocksdb/java/rocksjni/table_filter_jnicallback.h create mode 100644 src/rocksdb/java/rocksjni/testable_event_listener.cc create mode 100644 src/rocksdb/java/rocksjni/thread_status.cc create mode 100644 src/rocksdb/java/rocksjni/trace_writer.cc create mode 100644 src/rocksdb/java/rocksjni/trace_writer_jnicallback.cc create mode 100644 src/rocksdb/java/rocksjni/trace_writer_jnicallback.h create mode 100644 src/rocksdb/java/rocksjni/transaction.cc create mode 100644 src/rocksdb/java/rocksjni/transaction_db.cc create mode 100644 src/rocksdb/java/rocksjni/transaction_db_options.cc create mode 100644 src/rocksdb/java/rocksjni/transaction_log.cc create mode 100644 src/rocksdb/java/rocksjni/transaction_notifier.cc create mode 100644 src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.cc create mode 100644 src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.h create mode 100644 src/rocksdb/java/rocksjni/transaction_options.cc create mode 100644 src/rocksdb/java/rocksjni/ttl.cc create mode 100644 src/rocksdb/java/rocksjni/wal_filter.cc create mode 100644 src/rocksdb/java/rocksjni/wal_filter_jnicallback.cc create mode 100644 src/rocksdb/java/rocksjni/wal_filter_jnicallback.h create mode 100644 src/rocksdb/java/rocksjni/write_batch.cc create mode 100644 src/rocksdb/java/rocksjni/write_batch_test.cc create mode 100644 src/rocksdb/java/rocksjni/write_batch_with_index.cc create mode 100644 src/rocksdb/java/rocksjni/write_buffer_manager.cc create mode 100644 src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc create mode 100644 src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h create mode 100644 src/rocksdb/java/samples/src/main/java/OptimisticTransactionSample.java create mode 100644 src/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java create mode 100644 src/rocksdb/java/samples/src/main/java/RocksDBSample.java create mode 100644 src/rocksdb/java/samples/src/main/java/TransactionSample.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractEventListener.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractMutableOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractTableFilter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractTraceWriter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractWalFilter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/BackgroundErrorReason.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/BackupEngineOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Cache.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompactRangeOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobInfo.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobStats.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompactionReason.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ComparatorType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ConfigOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/DataBlockIndexType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/DbPath.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Env.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/EventListener.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Experimental.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/FileOperationInfo.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Filter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/FlushJobInfo.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/FlushReason.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Holder.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/IndexShorteningMode.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/IndexType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/KeyMayExist.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/LevelMetaData.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/LiveFileMetaData.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/LogFile.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Logger.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/MemTableInfo.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/MemoryUsageType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/MemoryUtil.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionKey.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionValue.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/OperationStage.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/OperationType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/OptionString.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Options.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/OptionsUtil.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/PersistentCache.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Priority.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Range.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RateLimiterMode.java create mode 100755 src/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ReusedSynchronisationType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RocksCallbackObject.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/SanityLevel.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/SizeApproximationFlag.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Slice.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/SstFileManager.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/SstFileMetaData.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/SstFileReader.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/SstFileReaderIterator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFactory.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/StateType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Statistics.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Status.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationInfo.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationReason.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TableFilter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TableProperties.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ThreadStatus.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/ThreadType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TickerType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TimedEnv.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TraceOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TraceWriter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/Transaction.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TransactionDB.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TransactionDBOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TransactionOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TransactionalDB.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TransactionalOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/UInt64AddOperator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WalFileType.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WalFilter.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WalProcessingOption.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WriteBufferManager.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WriteStallCondition.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/WriteStallInfo.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/util/ByteUtil.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/util/IntComparator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java create mode 100644 src/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/AbstractTransactionTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/BlobOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/BuiltinComparatorTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/ConcurrentTaskLimiterTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/DefaultEnvTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/EventListenerTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/FlushOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/MemoryUtilTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/MultiGetTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/MutableOptionsGetSetTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/OptionsUtilTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/PutMultiplePartsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/SecondaryDBTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/SstFileManagerTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/SstFileReaderTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/SstPartitionerTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/TableFilterTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/TimedEnvTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/TransactionOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/TransactionTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/Types.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/VerifyChecksumsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/WalFilterTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/test/TestableEventListener.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/DirectByteBufferAllocator.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/HeapByteBufferAllocator.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/IntComparatorTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/JNIComparatorTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/TestUtil.java create mode 100644 src/rocksdb/java/src/test/java/org/rocksdb/util/WriteBatchGetter.java create mode 100644 src/rocksdb/java/understanding_options.md (limited to 'src/rocksdb/java') diff --git a/src/rocksdb/java/CMakeLists.txt b/src/rocksdb/java/CMakeLists.txt new file mode 100644 index 000000000..5d62630fd --- /dev/null +++ b/src/rocksdb/java/CMakeLists.txt @@ -0,0 +1,549 @@ +cmake_minimum_required(VERSION 3.4) + +if(${CMAKE_VERSION} VERSION_LESS "3.11.4") + message("Please consider switching to CMake 3.11.4 or newer") +endif() + +set(CMAKE_JAVA_COMPILE_FLAGS -source 7) + +set(JNI_NATIVE_SOURCES + rocksjni/backup_engine_options.cc + rocksjni/backupenginejni.cc + rocksjni/cassandra_compactionfilterjni.cc + rocksjni/cassandra_value_operator.cc + rocksjni/checkpoint.cc + rocksjni/clock_cache.cc + rocksjni/cache.cc + rocksjni/columnfamilyhandle.cc + rocksjni/compaction_filter.cc + rocksjni/compaction_filter_factory.cc + rocksjni/compaction_filter_factory_jnicallback.cc + rocksjni/compaction_job_info.cc + rocksjni/compaction_job_stats.cc + rocksjni/compaction_options.cc + rocksjni/compaction_options_fifo.cc + rocksjni/compaction_options_universal.cc + rocksjni/compact_range_options.cc + rocksjni/comparator.cc + rocksjni/comparatorjnicallback.cc + rocksjni/compression_options.cc + rocksjni/concurrent_task_limiter.cc + rocksjni/config_options.cc + rocksjni/env.cc + rocksjni/env_options.cc + rocksjni/event_listener.cc + rocksjni/event_listener_jnicallback.cc + rocksjni/filter.cc + rocksjni/ingest_external_file_options.cc + rocksjni/iterator.cc + rocksjni/jnicallback.cc + rocksjni/loggerjnicallback.cc + rocksjni/lru_cache.cc + rocksjni/memory_util.cc + rocksjni/memtablejni.cc + rocksjni/merge_operator.cc + rocksjni/native_comparator_wrapper_test.cc + rocksjni/optimistic_transaction_db.cc + rocksjni/optimistic_transaction_options.cc + rocksjni/options.cc + rocksjni/options_util.cc + rocksjni/persistent_cache.cc + rocksjni/ratelimiterjni.cc + rocksjni/remove_emptyvalue_compactionfilterjni.cc + rocksjni/restorejni.cc + rocksjni/rocks_callback_object.cc + rocksjni/rocksdb_exception_test.cc + rocksjni/rocksjni.cc + rocksjni/slice.cc + rocksjni/snapshot.cc + rocksjni/sst_file_manager.cc + rocksjni/sst_file_writerjni.cc + rocksjni/sst_file_readerjni.cc + rocksjni/sst_file_reader_iterator.cc + rocksjni/sst_partitioner.cc + rocksjni/statistics.cc + rocksjni/statisticsjni.cc + rocksjni/table.cc + rocksjni/table_filter.cc + rocksjni/table_filter_jnicallback.cc + rocksjni/testable_event_listener.cc + rocksjni/thread_status.cc + rocksjni/trace_writer.cc + rocksjni/trace_writer_jnicallback.cc + rocksjni/transaction.cc + rocksjni/transaction_db.cc + rocksjni/transaction_db_options.cc + rocksjni/transaction_log.cc + rocksjni/transaction_notifier.cc + rocksjni/transaction_notifier_jnicallback.cc + rocksjni/transaction_options.cc + rocksjni/ttl.cc + rocksjni/wal_filter.cc + rocksjni/wal_filter_jnicallback.cc + rocksjni/write_batch.cc + rocksjni/writebatchhandlerjnicallback.cc + rocksjni/write_batch_test.cc + rocksjni/write_batch_with_index.cc + rocksjni/write_buffer_manager.cc +) + +set(JAVA_MAIN_CLASSES + src/main/java/org/rocksdb/AbstractCompactionFilter.java + src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java + src/main/java/org/rocksdb/AbstractComparator.java + src/main/java/org/rocksdb/AbstractEventListener.java + src/main/java/org/rocksdb/AbstractImmutableNativeReference.java + src/main/java/org/rocksdb/AbstractMutableOptions.java + src/main/java/org/rocksdb/AbstractNativeReference.java + src/main/java/org/rocksdb/AbstractRocksIterator.java + src/main/java/org/rocksdb/AbstractSlice.java + src/main/java/org/rocksdb/AbstractTableFilter.java + src/main/java/org/rocksdb/AbstractTraceWriter.java + src/main/java/org/rocksdb/AbstractTransactionNotifier.java + src/main/java/org/rocksdb/AbstractWalFilter.java + src/main/java/org/rocksdb/AbstractWriteBatch.java + src/main/java/org/rocksdb/AccessHint.java + src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java + src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java + src/main/java/org/rocksdb/BackgroundErrorReason.java + src/main/java/org/rocksdb/BackupEngineOptions.java + src/main/java/org/rocksdb/BackupEngine.java + src/main/java/org/rocksdb/BackupInfo.java + src/main/java/org/rocksdb/BlockBasedTableConfig.java + src/main/java/org/rocksdb/BloomFilter.java + src/main/java/org/rocksdb/BuiltinComparator.java + src/main/java/org/rocksdb/ByteBufferGetStatus.java + src/main/java/org/rocksdb/Cache.java + src/main/java/org/rocksdb/CassandraCompactionFilter.java + src/main/java/org/rocksdb/CassandraValueMergeOperator.java + src/main/java/org/rocksdb/Checkpoint.java + src/main/java/org/rocksdb/ChecksumType.java + src/main/java/org/rocksdb/ClockCache.java + src/main/java/org/rocksdb/ColumnFamilyDescriptor.java + src/main/java/org/rocksdb/ColumnFamilyHandle.java + src/main/java/org/rocksdb/ColumnFamilyMetaData.java + src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java + src/main/java/org/rocksdb/ColumnFamilyOptions.java + src/main/java/org/rocksdb/CompactionJobInfo.java + src/main/java/org/rocksdb/CompactionJobStats.java + src/main/java/org/rocksdb/CompactionOptions.java + src/main/java/org/rocksdb/CompactionOptionsFIFO.java + src/main/java/org/rocksdb/CompactionOptionsUniversal.java + src/main/java/org/rocksdb/CompactionPriority.java + src/main/java/org/rocksdb/CompactionReason.java + src/main/java/org/rocksdb/CompactRangeOptions.java + src/main/java/org/rocksdb/CompactionStopStyle.java + src/main/java/org/rocksdb/CompactionStyle.java + src/main/java/org/rocksdb/ComparatorOptions.java + src/main/java/org/rocksdb/ComparatorType.java + src/main/java/org/rocksdb/CompressionOptions.java + src/main/java/org/rocksdb/CompressionType.java + src/main/java/org/rocksdb/ConfigOptions.java + src/main/java/org/rocksdb/DataBlockIndexType.java + src/main/java/org/rocksdb/DBOptionsInterface.java + src/main/java/org/rocksdb/DBOptions.java + src/main/java/org/rocksdb/DbPath.java + src/main/java/org/rocksdb/DirectSlice.java + src/main/java/org/rocksdb/EncodingType.java + src/main/java/org/rocksdb/Env.java + src/main/java/org/rocksdb/EnvOptions.java + src/main/java/org/rocksdb/EventListener.java + src/main/java/org/rocksdb/Experimental.java + src/main/java/org/rocksdb/ExternalFileIngestionInfo.java + src/main/java/org/rocksdb/Filter.java + src/main/java/org/rocksdb/FileOperationInfo.java + src/main/java/org/rocksdb/FlushJobInfo.java + src/main/java/org/rocksdb/FlushReason.java + src/main/java/org/rocksdb/FlushOptions.java + src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java + src/main/java/org/rocksdb/HashSkipListMemTableConfig.java + src/main/java/org/rocksdb/HistogramData.java + src/main/java/org/rocksdb/HistogramType.java + src/main/java/org/rocksdb/Holder.java + src/main/java/org/rocksdb/IndexShorteningMode.java + src/main/java/org/rocksdb/IndexType.java + src/main/java/org/rocksdb/InfoLogLevel.java + src/main/java/org/rocksdb/IngestExternalFileOptions.java + src/main/java/org/rocksdb/LevelMetaData.java + src/main/java/org/rocksdb/ConcurrentTaskLimiter.java + src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java + src/main/java/org/rocksdb/KeyMayExist.java + src/main/java/org/rocksdb/LiveFileMetaData.java + src/main/java/org/rocksdb/LogFile.java + src/main/java/org/rocksdb/Logger.java + src/main/java/org/rocksdb/LRUCache.java + src/main/java/org/rocksdb/MemoryUsageType.java + src/main/java/org/rocksdb/MemoryUtil.java + src/main/java/org/rocksdb/MemTableConfig.java + src/main/java/org/rocksdb/MemTableInfo.java + src/main/java/org/rocksdb/MergeOperator.java + src/main/java/org/rocksdb/MutableColumnFamilyOptions.java + src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java + src/main/java/org/rocksdb/MutableDBOptions.java + src/main/java/org/rocksdb/MutableDBOptionsInterface.java + src/main/java/org/rocksdb/MutableOptionKey.java + src/main/java/org/rocksdb/MutableOptionValue.java + src/main/java/org/rocksdb/NativeComparatorWrapper.java + src/main/java/org/rocksdb/NativeLibraryLoader.java + src/main/java/org/rocksdb/OperationStage.java + src/main/java/org/rocksdb/OperationType.java + src/main/java/org/rocksdb/OptimisticTransactionDB.java + src/main/java/org/rocksdb/OptimisticTransactionOptions.java + src/main/java/org/rocksdb/Options.java + src/main/java/org/rocksdb/OptionString.java + src/main/java/org/rocksdb/OptionsUtil.java + src/main/java/org/rocksdb/PersistentCache.java + src/main/java/org/rocksdb/PlainTableConfig.java + src/main/java/org/rocksdb/PrepopulateBlobCache.java + src/main/java/org/rocksdb/Priority.java + src/main/java/org/rocksdb/Range.java + src/main/java/org/rocksdb/RateLimiter.java + src/main/java/org/rocksdb/RateLimiterMode.java + src/main/java/org/rocksdb/ReadOptions.java + src/main/java/org/rocksdb/ReadTier.java + src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java + src/main/java/org/rocksdb/RestoreOptions.java + src/main/java/org/rocksdb/ReusedSynchronisationType.java + src/main/java/org/rocksdb/RocksCallbackObject.java + src/main/java/org/rocksdb/RocksDBException.java + src/main/java/org/rocksdb/RocksDB.java + src/main/java/org/rocksdb/RocksEnv.java + src/main/java/org/rocksdb/RocksIteratorInterface.java + src/main/java/org/rocksdb/RocksIterator.java + src/main/java/org/rocksdb/RocksMemEnv.java + src/main/java/org/rocksdb/RocksMutableObject.java + src/main/java/org/rocksdb/RocksObject.java + src/main/java/org/rocksdb/SanityLevel.java + src/main/java/org/rocksdb/SizeApproximationFlag.java + src/main/java/org/rocksdb/SkipListMemTableConfig.java + src/main/java/org/rocksdb/Slice.java + src/main/java/org/rocksdb/Snapshot.java + src/main/java/org/rocksdb/SstFileManager.java + src/main/java/org/rocksdb/SstFileMetaData.java + src/main/java/org/rocksdb/SstFileReader.java + src/main/java/org/rocksdb/SstFileReaderIterator.java + src/main/java/org/rocksdb/SstFileWriter.java + src/main/java/org/rocksdb/SstPartitionerFactory.java + src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java + src/main/java/org/rocksdb/StateType.java + src/main/java/org/rocksdb/StatisticsCollectorCallback.java + src/main/java/org/rocksdb/StatisticsCollector.java + src/main/java/org/rocksdb/Statistics.java + src/main/java/org/rocksdb/StatsCollectorInput.java + src/main/java/org/rocksdb/StatsLevel.java + src/main/java/org/rocksdb/Status.java + src/main/java/org/rocksdb/StringAppendOperator.java + src/main/java/org/rocksdb/TableFileCreationBriefInfo.java + src/main/java/org/rocksdb/TableFileCreationInfo.java + src/main/java/org/rocksdb/TableFileCreationReason.java + src/main/java/org/rocksdb/TableFileDeletionInfo.java + src/main/java/org/rocksdb/TableFilter.java + src/main/java/org/rocksdb/TableProperties.java + src/main/java/org/rocksdb/TableFormatConfig.java + src/main/java/org/rocksdb/ThreadType.java + src/main/java/org/rocksdb/ThreadStatus.java + src/main/java/org/rocksdb/TickerType.java + src/main/java/org/rocksdb/TimedEnv.java + src/main/java/org/rocksdb/TraceOptions.java + src/main/java/org/rocksdb/TraceWriter.java + src/main/java/org/rocksdb/TransactionalDB.java + src/main/java/org/rocksdb/TransactionalOptions.java + src/main/java/org/rocksdb/TransactionDB.java + src/main/java/org/rocksdb/TransactionDBOptions.java + src/main/java/org/rocksdb/Transaction.java + src/main/java/org/rocksdb/TransactionLogIterator.java + src/main/java/org/rocksdb/TransactionOptions.java + src/main/java/org/rocksdb/TtlDB.java + src/main/java/org/rocksdb/TxnDBWritePolicy.java + src/main/java/org/rocksdb/VectorMemTableConfig.java + src/main/java/org/rocksdb/WalFileType.java + src/main/java/org/rocksdb/WalFilter.java + src/main/java/org/rocksdb/WalProcessingOption.java + src/main/java/org/rocksdb/WALRecoveryMode.java + src/main/java/org/rocksdb/WBWIRocksIterator.java + src/main/java/org/rocksdb/WriteBatch.java + src/main/java/org/rocksdb/WriteBatchInterface.java + src/main/java/org/rocksdb/WriteBatchWithIndex.java + src/main/java/org/rocksdb/WriteOptions.java + src/main/java/org/rocksdb/WriteBufferManager.java + src/main/java/org/rocksdb/WriteStallCondition.java + src/main/java/org/rocksdb/WriteStallInfo.java + src/main/java/org/rocksdb/util/ByteUtil.java + src/main/java/org/rocksdb/util/BytewiseComparator.java + src/main/java/org/rocksdb/util/Environment.java + src/main/java/org/rocksdb/util/IntComparator.java + src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java + src/main/java/org/rocksdb/util/SizeUnit.java + src/main/java/org/rocksdb/UInt64AddOperator.java +) + +set(JAVA_TEST_CLASSES + src/test/java/org/rocksdb/BackupEngineTest.java + src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java + src/test/java/org/rocksdb/NativeComparatorWrapperTest.java + src/test/java/org/rocksdb/PlatformRandomHelper.java + src/test/java/org/rocksdb/RocksDBExceptionTest.java + src/test/java/org/rocksdb/RocksNativeLibraryResource.java + src/test/java/org/rocksdb/SnapshotTest.java + src/test/java/org/rocksdb/WriteBatchTest.java + src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java + src/test/java/org/rocksdb/util/WriteBatchGetter.java + src/test/java/org/rocksdb/test/TestableEventListener.java +) + +include(FindJava) +include(UseJava) +find_package(JNI) + +include_directories(${JNI_INCLUDE_DIRS}) +include_directories(${PROJECT_SOURCE_DIR}/java) + +set(JAVA_TEST_LIBDIR ${PROJECT_SOURCE_DIR}/java/test-libs) +set(JAVA_TMP_JAR ${JAVA_TEST_LIBDIR}/tmp.jar) +set(JAVA_JUNIT_JAR ${JAVA_TEST_LIBDIR}/junit-4.12.jar) +set(JAVA_HAMCR_JAR ${JAVA_TEST_LIBDIR}/hamcrest-core-1.3.jar) +set(JAVA_MOCKITO_JAR ${JAVA_TEST_LIBDIR}/mockito-all-1.10.19.jar) +set(JAVA_CGLIB_JAR ${JAVA_TEST_LIBDIR}/cglib-2.2.2.jar) +set(JAVA_ASSERTJ_JAR ${JAVA_TEST_LIBDIR}/assertj-core-1.7.1.jar) +set(JAVA_TESTCLASSPATH ${JAVA_JUNIT_JAR} ${JAVA_HAMCR_JAR} ${JAVA_MOCKITO_JAR} ${JAVA_CGLIB_JAR} ${JAVA_ASSERTJ_JAR}) + +set(JNI_OUTPUT_DIR ${PROJECT_SOURCE_DIR}/java/include) +file(MAKE_DIRECTORY ${JNI_OUTPUT_DIR}) + +if(${Java_VERSION_MINOR} VERSION_LESS_EQUAL "7" AND ${Java_VERSION_MAJOR} STREQUAL "1") + message(FATAL_ERROR "Detected Java 7 or older (${Java_VERSION_STRING}), minimum required version in now Java 8") +endif() + +if(${Java_VERSION_MAJOR} VERSION_GREATER_EQUAL "10" AND ${CMAKE_VERSION} VERSION_LESS "3.11.4") + # Java 10 and newer don't have javah, but the alternative GENERATE_NATIVE_HEADERS requires CMake 3.11.4 or newer + message(FATAL_ERROR "Detected Java 10 or newer (${Java_VERSION_STRING}), to build with CMake please upgrade CMake to 3.11.4 or newer") + +elseif(${CMAKE_VERSION} VERSION_LESS "3.11.4") + # Old CMake + message("Using an old CMAKE (${CMAKE_VERSION}) - JNI headers generated in separate step") + add_jar( + rocksdbjni_classes + SOURCES + ${JAVA_MAIN_CLASSES} + ${JAVA_TEST_CLASSES} + INCLUDE_JARS ${JAVA_TESTCLASSPATH} + ) + +else () + # Java 1.8 or newer prepare the JAR... + message("Preparing Jar for JDK ${Java_VERSION_STRING}") + add_jar( + rocksdbjni_classes + SOURCES + ${JAVA_MAIN_CLASSES} + ${JAVA_TEST_CLASSES} + INCLUDE_JARS ${JAVA_TESTCLASSPATH} + GENERATE_NATIVE_HEADERS rocksdbjni_headers DESTINATION ${JNI_OUTPUT_DIR} + ) + +endif() + +if(NOT EXISTS ${PROJECT_SOURCE_DIR}/java/classes) + file(MAKE_DIRECTORY ${PROJECT_SOURCE_DIR}/java/classes) +endif() + +if(NOT EXISTS ${JAVA_TEST_LIBDIR}) + file(MAKE_DIRECTORY mkdir ${JAVA_TEST_LIBDIR}) +endif() + +if (DEFINED CUSTOM_DEPS_URL) + set(DEPS_URL ${CUSTOM_DEPS_URL}/) +else () + # Using a Facebook AWS account for S3 storage. (maven.org has a history + # of failing in Travis builds.) + set(DEPS_URL "https://rocksdb-deps.s3-us-west-2.amazonaws.com/jars") +endif() + +if(NOT EXISTS ${JAVA_JUNIT_JAR}) + message("Downloading ${JAVA_JUNIT_JAR}") + file(DOWNLOAD ${DEPS_URL}/junit-4.12.jar ${JAVA_TMP_JAR} STATUS downloadStatus) + list(GET downloadStatus 0 error_code) + list(GET downloadStatus 1 error_message) + if(NOT error_code EQUAL 0) + message(FATAL_ERROR "Failed downloading ${JAVA_JUNIT_JAR}: ${error_message}") + endif() + file(RENAME ${JAVA_TMP_JAR} ${JAVA_JUNIT_JAR}) +endif() +if(NOT EXISTS ${JAVA_HAMCR_JAR}) + message("Downloading ${JAVA_HAMCR_JAR}") + file(DOWNLOAD ${DEPS_URL}/hamcrest-core-1.3.jar ${JAVA_TMP_JAR} STATUS downloadStatus) + list(GET downloadStatus 0 error_code) + list(GET downloadStatus 1 error_message) + if(NOT error_code EQUAL 0) + message(FATAL_ERROR "Failed downloading ${JAVA_HAMCR_JAR}: ${error_message}") + endif() + file(RENAME ${JAVA_TMP_JAR} ${JAVA_HAMCR_JAR}) +endif() +if(NOT EXISTS ${JAVA_MOCKITO_JAR}) + message("Downloading ${JAVA_MOCKITO_JAR}") + file(DOWNLOAD ${DEPS_URL}/mockito-all-1.10.19.jar ${JAVA_TMP_JAR} STATUS downloadStatus) + list(GET downloadStatus 0 error_code) + list(GET downloadStatus 1 error_message) + if(NOT error_code EQUAL 0) + message(FATAL_ERROR "Failed downloading ${JAVA_MOCKITO_JAR}: ${error_message}") + endif() + file(RENAME ${JAVA_TMP_JAR} ${JAVA_MOCKITO_JAR}) +endif() +if(NOT EXISTS ${JAVA_CGLIB_JAR}) + message("Downloading ${JAVA_CGLIB_JAR}") + file(DOWNLOAD ${DEPS_URL}/cglib-2.2.2.jar ${JAVA_TMP_JAR} STATUS downloadStatus) + list(GET downloadStatus 0 error_code) + list(GET downloadStatus 1 error_message) + if(NOT error_code EQUAL 0) + message(FATAL_ERROR "Failed downloading ${JAVA_CGLIB_JAR}: ${error_message}") + endif() + file(RENAME ${JAVA_TMP_JAR} ${JAVA_CGLIB_JAR}) +endif() +if(NOT EXISTS ${JAVA_ASSERTJ_JAR}) + message("Downloading ${JAVA_ASSERTJ_JAR}") + file(DOWNLOAD ${DEPS_URL}/assertj-core-1.7.1.jar ${JAVA_TMP_JAR} STATUS downloadStatus) + list(GET downloadStatus 0 error_code) + list(GET downloadStatus 1 error_message) + if(NOT error_code EQUAL 0) + message(FATAL_ERROR "Failed downloading ${JAVA_ASSERTJ_JAR}: ${error_message}") + endif() + file(RENAME ${JAVA_TMP_JAR} ${JAVA_ASSERTJ_JAR}) +endif() + +if(${CMAKE_VERSION} VERSION_LESS "3.11.4") + # Old CMake ONLY generate JNI headers, otherwise JNI is handled in add_jar step above + message("Preparing JNI headers for old CMake (${CMAKE_VERSION})") + set(NATIVE_JAVA_CLASSES + org.rocksdb.AbstractCompactionFilter + org.rocksdb.AbstractCompactionFilterFactory + org.rocksdb.AbstractComparator + org.rocksdb.AbstractEventListener + org.rocksdb.AbstractImmutableNativeReference + org.rocksdb.AbstractNativeReference + org.rocksdb.AbstractRocksIterator + org.rocksdb.AbstractSlice + org.rocksdb.AbstractTableFilter + org.rocksdb.AbstractTraceWriter + org.rocksdb.AbstractTransactionNotifier + org.rocksdb.AbstractWalFilter + org.rocksdb.BackupEngineOptions + org.rocksdb.BackupEngine + org.rocksdb.BlockBasedTableConfig + org.rocksdb.BloomFilter + org.rocksdb.CassandraCompactionFilter + org.rocksdb.CassandraValueMergeOperator + org.rocksdb.Checkpoint + org.rocksdb.ClockCache + org.rocksdb.Cache + org.rocksdb.ColumnFamilyHandle + org.rocksdb.ColumnFamilyOptions + org.rocksdb.CompactionJobInfo + org.rocksdb.CompactionJobStats + org.rocksdb.CompactionOptions + org.rocksdb.CompactionOptionsFIFO + org.rocksdb.CompactionOptionsUniversal + org.rocksdb.CompactRangeOptions + org.rocksdb.ComparatorOptions + org.rocksdb.CompressionOptions + org.rocksdb.ConcurrentTaskLimiterImpl + org.rocksdb.ConfigOptions + org.rocksdb.DBOptions + org.rocksdb.DirectSlice + org.rocksdb.Env + org.rocksdb.EnvOptions + org.rocksdb.Filter + org.rocksdb.FlushOptions + org.rocksdb.HashLinkedListMemTableConfig + org.rocksdb.HashSkipListMemTableConfig + org.rocksdb.IngestExternalFileOptions + org.rocksdb.Logger + org.rocksdb.LRUCache + org.rocksdb.MemoryUtil + org.rocksdb.MemTableConfig + org.rocksdb.NativeComparatorWrapper + org.rocksdb.NativeLibraryLoader + org.rocksdb.OptimisticTransactionDB + org.rocksdb.OptimisticTransactionOptions + org.rocksdb.Options + org.rocksdb.OptionsUtil + org.rocksdb.PersistentCache + org.rocksdb.PlainTableConfig + org.rocksdb.RateLimiter + org.rocksdb.ReadOptions + org.rocksdb.RemoveEmptyValueCompactionFilter + org.rocksdb.RestoreOptions + org.rocksdb.RocksCallbackObject + org.rocksdb.RocksDB + org.rocksdb.RocksEnv + org.rocksdb.RocksIterator + org.rocksdb.RocksIteratorInterface + org.rocksdb.RocksMemEnv + org.rocksdb.RocksMutableObject + org.rocksdb.RocksObject + org.rocksdb.SkipListMemTableConfig + org.rocksdb.Slice + org.rocksdb.Snapshot + org.rocksdb.SstFileManager + org.rocksdb.SstFileWriter + org.rocksdb.SstFileReader + org.rocksdb.SstFileReaderIterator + org.rocksdb.SstPartitionerFactory + org.rocksdb.SstPartitionerFixedPrefixFactory + org.rocksdb.Statistics + org.rocksdb.StringAppendOperator + org.rocksdb.TableFormatConfig + org.rocksdb.ThreadStatus + org.rocksdb.TimedEnv + org.rocksdb.Transaction + org.rocksdb.TransactionDB + org.rocksdb.TransactionDBOptions + org.rocksdb.TransactionLogIterator + org.rocksdb.TransactionOptions + org.rocksdb.TtlDB + org.rocksdb.UInt64AddOperator + org.rocksdb.VectorMemTableConfig + org.rocksdb.WBWIRocksIterator + org.rocksdb.WriteBatch + org.rocksdb.WriteBatch.Handler + org.rocksdb.WriteBatchInterface + org.rocksdb.WriteBatchWithIndex + org.rocksdb.WriteOptions + org.rocksdb.NativeComparatorWrapperTest + org.rocksdb.RocksDBExceptionTest + org.rocksdb.SnapshotTest + org.rocksdb.WriteBatchTest + org.rocksdb.WriteBatchTestInternalHelper + org.rocksdb.WriteBufferManager + org.rocksdb.test.TestableEventListener + ) + + create_javah( + TARGET rocksdbjni_headers + CLASSES ${NATIVE_JAVA_CLASSES} + CLASSPATH rocksdbjni_classes ${JAVA_TESTCLASSPATH} + OUTPUT_DIR ${JNI_OUTPUT_DIR} + ) +endif() + +if(NOT MSVC) + set_property(TARGET ${ROCKSDB_STATIC_LIB} PROPERTY POSITION_INDEPENDENT_CODE ON) +endif() + +set(ROCKSDBJNI_STATIC_LIB rocksdbjni${ARTIFACT_SUFFIX}) +add_library(${ROCKSDBJNI_STATIC_LIB} ${JNI_NATIVE_SOURCES}) +add_dependencies(${ROCKSDBJNI_STATIC_LIB} rocksdbjni_headers) +target_link_libraries(${ROCKSDBJNI_STATIC_LIB} ${ROCKSDB_STATIC_LIB} ${ROCKSDB_LIB}) + +if(NOT MINGW) + set(ROCKSDBJNI_SHARED_LIB rocksdbjni-shared${ARTIFACT_SUFFIX}) + add_library(${ROCKSDBJNI_SHARED_LIB} SHARED ${JNI_NATIVE_SOURCES}) + add_dependencies(${ROCKSDBJNI_SHARED_LIB} rocksdbjni_headers) + target_link_libraries(${ROCKSDBJNI_SHARED_LIB} ${ROCKSDB_STATIC_LIB} ${ROCKSDB_LIB}) + + set_target_properties( + ${ROCKSDBJNI_SHARED_LIB} + PROPERTIES + COMPILE_PDB_OUTPUT_DIRECTORY ${CMAKE_CFG_INTDIR} + COMPILE_PDB_NAME ${ROCKSDBJNI_STATIC_LIB}.pdb + ) +endif() diff --git a/src/rocksdb/java/HISTORY-JAVA.md b/src/rocksdb/java/HISTORY-JAVA.md new file mode 100644 index 000000000..731886a61 --- /dev/null +++ b/src/rocksdb/java/HISTORY-JAVA.md @@ -0,0 +1,86 @@ +# RocksJava Change Log + +## 3.13 (8/4/2015) +### New Features +* Exposed BackupEngine API. +* Added CappedPrefixExtractor support. To use such extractor, simply call useCappedPrefixExtractor in either Options or ColumnFamilyOptions. +* Added RemoveEmptyValueCompactionFilter. + +## 3.10.0 (3/24/2015) +### New Features +* Added compression per level API. +* MemEnv is now available in RocksJava via RocksMemEnv class. +* lz4 compression is now included in rocksjava static library when running `make rocksdbjavastatic`. + +### Public API Changes +* Overflowing a size_t when setting rocksdb options now throws an IllegalArgumentException, which removes the necessity for a developer to catch these Exceptions explicitly. +* The set and get functions for tableCacheRemoveScanCountLimit are deprecated. + + +## By 01/31/2015 +### New Features +* WriteBatchWithIndex support. +* Iterator support for WriteBatch and WriteBatchWithIndex +* GetUpdatesSince support. +* Snapshots carry now information about the related sequence number. +* TTL DB support. + +## By 11/14/2014 +### New Features +* Full support for Column Family. +* Slice and Comparator support. +* Default merge operator support. +* RateLimiter support. + +## By 06/15/2014 +### New Features +* Added basic Java binding for rocksdb::Env such that multiple RocksDB can share the same thread pool and environment. +* Added RestoreBackupableDB + +## By 05/30/2014 +### Internal Framework Improvement +* Added disOwnNativeHandle to RocksObject, which allows a RocksObject to give-up the ownership of its native handle. This method is useful when sharing and transferring the ownership of RocksDB C++ resources. + +## By 05/15/2014 +### New Features +* Added RocksObject --- the base class of all RocksDB classes which holds some RocksDB resources in the C++ side. +* Use environmental variable JAVA_HOME in Makefile for RocksJava +### Public API changes +* Renamed org.rocksdb.Iterator to org.rocksdb.RocksIterator to avoid potential confliction with Java built-in Iterator. + +## By 04/30/2014 +### New Features +* Added Java binding for MultiGet. +* Added static method RocksDB.loadLibrary(), which loads necessary library files. +* Added Java bindings for 60+ rocksdb::Options. +* Added Java binding for BloomFilter. +* Added Java binding for ReadOptions. +* Added Java binding for memtables. +* Added Java binding for sst formats. +* Added Java binding for RocksDB Iterator which enables sequential scan operation. +* Added Java binding for Statistics +* Added Java binding for BackupableDB. + +### DB Benchmark +* Added filluniquerandom, readseq benchmark. +* 70+ command-line options. +* Enabled BloomFilter configuration. + +## By 04/15/2014 +### New Features +* Added Java binding for WriteOptions. +* Added Java binding for WriteBatch, which enables batch-write. +* Added Java binding for rocksdb::Options. +* Added Java binding for block cache. +* Added Java version DB Benchmark. + +### DB Benchmark +* Added readwhilewriting benchmark. + +### Internal Framework Improvement +* Avoid a potential byte-array-copy between c++ and Java in RocksDB.get. +* Added SizeUnit in org.rocksdb.util to store consts like KB and GB. + +### 03/28/2014 +* RocksJava project started. +* Added Java binding for RocksDB, which supports Open, Close, Get and Put. diff --git a/src/rocksdb/java/Makefile b/src/rocksdb/java/Makefile new file mode 100644 index 000000000..bc7e121c4 --- /dev/null +++ b/src/rocksdb/java/Makefile @@ -0,0 +1,452 @@ +NATIVE_JAVA_CLASSES = \ + org.rocksdb.AbstractCompactionFilter\ + org.rocksdb.AbstractCompactionFilterFactory\ + org.rocksdb.AbstractComparator\ + org.rocksdb.AbstractEventListener\ + org.rocksdb.AbstractSlice\ + org.rocksdb.AbstractTableFilter\ + org.rocksdb.AbstractTraceWriter\ + org.rocksdb.AbstractTransactionNotifier\ + org.rocksdb.AbstractWalFilter\ + org.rocksdb.BackupEngine\ + org.rocksdb.BackupEngineOptions\ + org.rocksdb.BlockBasedTableConfig\ + org.rocksdb.BloomFilter\ + org.rocksdb.Checkpoint\ + org.rocksdb.ClockCache\ + org.rocksdb.Cache\ + org.rocksdb.CassandraCompactionFilter\ + org.rocksdb.CassandraValueMergeOperator\ + org.rocksdb.ColumnFamilyHandle\ + org.rocksdb.ColumnFamilyOptions\ + org.rocksdb.CompactionJobInfo\ + org.rocksdb.CompactionJobStats\ + org.rocksdb.CompactionOptions\ + org.rocksdb.CompactionOptionsFIFO\ + org.rocksdb.CompactionOptionsUniversal\ + org.rocksdb.CompactRangeOptions\ + org.rocksdb.ComparatorOptions\ + org.rocksdb.CompressionOptions\ + org.rocksdb.ConfigOptions\ + org.rocksdb.DBOptions\ + org.rocksdb.DirectSlice\ + org.rocksdb.Env\ + org.rocksdb.EnvOptions\ + org.rocksdb.FlushOptions\ + org.rocksdb.Filter\ + org.rocksdb.IngestExternalFileOptions\ + org.rocksdb.HashLinkedListMemTableConfig\ + org.rocksdb.HashSkipListMemTableConfig\ + org.rocksdb.ConcurrentTaskLimiter\ + org.rocksdb.ConcurrentTaskLimiterImpl\ + org.rocksdb.KeyMayExist\ + org.rocksdb.Logger\ + org.rocksdb.LRUCache\ + org.rocksdb.MemoryUsageType\ + org.rocksdb.MemoryUtil\ + org.rocksdb.MergeOperator\ + org.rocksdb.NativeComparatorWrapper\ + org.rocksdb.OptimisticTransactionDB\ + org.rocksdb.OptimisticTransactionOptions\ + org.rocksdb.Options\ + org.rocksdb.OptionsUtil\ + org.rocksdb.PersistentCache\ + org.rocksdb.PlainTableConfig\ + org.rocksdb.RateLimiter\ + org.rocksdb.ReadOptions\ + org.rocksdb.RemoveEmptyValueCompactionFilter\ + org.rocksdb.RestoreOptions\ + org.rocksdb.RocksCallbackObject\ + org.rocksdb.RocksDB\ + org.rocksdb.RocksEnv\ + org.rocksdb.RocksIterator\ + org.rocksdb.RocksMemEnv\ + org.rocksdb.SkipListMemTableConfig\ + org.rocksdb.Slice\ + org.rocksdb.SstFileManager\ + org.rocksdb.SstFileWriter\ + org.rocksdb.SstFileReader\ + org.rocksdb.SstFileReaderIterator\ + org.rocksdb.SstPartitionerFactory\ + org.rocksdb.SstPartitionerFixedPrefixFactory\ + org.rocksdb.Statistics\ + org.rocksdb.ThreadStatus\ + org.rocksdb.TimedEnv\ + org.rocksdb.Transaction\ + org.rocksdb.TransactionDB\ + org.rocksdb.TransactionDBOptions\ + org.rocksdb.TransactionOptions\ + org.rocksdb.TransactionLogIterator\ + org.rocksdb.TtlDB\ + org.rocksdb.VectorMemTableConfig\ + org.rocksdb.Snapshot\ + org.rocksdb.StringAppendOperator\ + org.rocksdb.UInt64AddOperator\ + org.rocksdb.WriteBatch\ + org.rocksdb.WriteBatch.Handler\ + org.rocksdb.WriteOptions\ + org.rocksdb.WriteBatchWithIndex\ + org.rocksdb.WriteBufferManager\ + org.rocksdb.WBWIRocksIterator + +NATIVE_JAVA_TEST_CLASSES = \ + org.rocksdb.RocksDBExceptionTest\ + org.rocksdb.test.TestableEventListener\ + org.rocksdb.NativeComparatorWrapperTest.NativeStringComparatorWrapper\ + org.rocksdb.WriteBatchTest\ + org.rocksdb.WriteBatchTestInternalHelper + +ROCKSDB_MAJOR = $(shell grep -E "ROCKSDB_MAJOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3) +ROCKSDB_MINOR = $(shell grep -E "ROCKSDB_MINOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3) +ROCKSDB_PATCH = $(shell grep -E "ROCKSDB_PATCH.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3) + +NATIVE_INCLUDE = ./include +ARCH := $(shell getconf LONG_BIT) +SHA256_CMD ?= sha256sum + +JAVA_TESTS = \ + org.rocksdb.BackupEngineOptionsTest\ + org.rocksdb.BackupEngineTest\ + org.rocksdb.BlobOptionsTest\ + org.rocksdb.BlockBasedTableConfigTest\ + org.rocksdb.BuiltinComparatorTest\ + org.rocksdb.BytewiseComparatorRegressionTest\ + org.rocksdb.util.BytewiseComparatorTest\ + org.rocksdb.util.BytewiseComparatorIntTest\ + org.rocksdb.CheckPointTest\ + org.rocksdb.ClockCacheTest\ + org.rocksdb.ColumnFamilyOptionsTest\ + org.rocksdb.ColumnFamilyTest\ + org.rocksdb.CompactionFilterFactoryTest\ + org.rocksdb.CompactionJobInfoTest\ + org.rocksdb.CompactionJobStatsTest\ + org.rocksdb.CompactionOptionsTest\ + org.rocksdb.CompactionOptionsFIFOTest\ + org.rocksdb.CompactionOptionsUniversalTest\ + org.rocksdb.CompactionPriorityTest\ + org.rocksdb.CompactionStopStyleTest\ + org.rocksdb.ComparatorOptionsTest\ + org.rocksdb.CompressionOptionsTest\ + org.rocksdb.CompressionTypesTest\ + org.rocksdb.DBOptionsTest\ + org.rocksdb.DirectSliceTest\ + org.rocksdb.util.EnvironmentTest\ + org.rocksdb.EnvOptionsTest\ + org.rocksdb.EventListenerTest\ + org.rocksdb.IngestExternalFileOptionsTest\ + org.rocksdb.util.IntComparatorTest\ + org.rocksdb.util.JNIComparatorTest\ + org.rocksdb.FilterTest\ + org.rocksdb.FlushTest\ + org.rocksdb.InfoLogLevelTest\ + org.rocksdb.KeyMayExistTest\ + org.rocksdb.ConcurrentTaskLimiterTest\ + org.rocksdb.LoggerTest\ + org.rocksdb.LRUCacheTest\ + org.rocksdb.MemoryUtilTest\ + org.rocksdb.MemTableTest\ + org.rocksdb.MergeTest\ + org.rocksdb.MultiColumnRegressionTest \ + org.rocksdb.MultiGetManyKeysTest\ + org.rocksdb.MultiGetTest\ + org.rocksdb.MixedOptionsTest\ + org.rocksdb.MutableColumnFamilyOptionsTest\ + org.rocksdb.MutableDBOptionsTest\ + org.rocksdb.MutableOptionsGetSetTest \ + org.rocksdb.NativeComparatorWrapperTest\ + org.rocksdb.NativeLibraryLoaderTest\ + org.rocksdb.OptimisticTransactionTest\ + org.rocksdb.OptimisticTransactionDBTest\ + org.rocksdb.OptimisticTransactionOptionsTest\ + org.rocksdb.OptionsUtilTest\ + org.rocksdb.OptionsTest\ + org.rocksdb.PlainTableConfigTest\ + org.rocksdb.RateLimiterTest\ + org.rocksdb.ReadOnlyTest\ + org.rocksdb.ReadOptionsTest\ + org.rocksdb.util.ReverseBytewiseComparatorIntTest\ + org.rocksdb.RocksDBTest\ + org.rocksdb.RocksDBExceptionTest\ + org.rocksdb.DefaultEnvTest\ + org.rocksdb.RocksIteratorTest\ + org.rocksdb.RocksMemEnvTest\ + org.rocksdb.util.SizeUnitTest\ + org.rocksdb.SecondaryDBTest\ + org.rocksdb.SliceTest\ + org.rocksdb.SnapshotTest\ + org.rocksdb.SstFileManagerTest\ + org.rocksdb.SstFileWriterTest\ + org.rocksdb.SstFileReaderTest\ + org.rocksdb.SstPartitionerTest\ + org.rocksdb.TableFilterTest\ + org.rocksdb.TimedEnvTest\ + org.rocksdb.TransactionTest\ + org.rocksdb.TransactionDBTest\ + org.rocksdb.TransactionOptionsTest\ + org.rocksdb.TransactionDBOptionsTest\ + org.rocksdb.TransactionLogIteratorTest\ + org.rocksdb.TtlDBTest\ + org.rocksdb.StatisticsTest\ + org.rocksdb.StatisticsCollectorTest\ + org.rocksdb.VerifyChecksumsTest\ + org.rocksdb.WalFilterTest\ + org.rocksdb.WALRecoveryModeTest\ + org.rocksdb.WriteBatchHandlerTest\ + org.rocksdb.WriteBatchTest\ + org.rocksdb.WriteBatchThreadedTest\ + org.rocksdb.WriteOptionsTest\ + org.rocksdb.WriteBatchWithIndexTest + +MAIN_SRC = src/main/java +TEST_SRC = src/test/java +OUTPUT = target +MAIN_CLASSES = $(OUTPUT)/classes +TEST_CLASSES = $(OUTPUT)/test-classes +JAVADOC = $(OUTPUT)/apidocs + +BENCHMARK_MAIN_SRC = benchmark/src/main/java +BENCHMARK_OUTPUT = benchmark/target +BENCHMARK_MAIN_CLASSES = $(BENCHMARK_OUTPUT)/classes + +SAMPLES_MAIN_SRC = samples/src/main/java +SAMPLES_OUTPUT = samples/target +SAMPLES_MAIN_CLASSES = $(SAMPLES_OUTPUT)/classes + +JAVA_TEST_LIBDIR = test-libs +JAVA_JUNIT_VER = 4.13.1 +JAVA_JUNIT_SHA256 = c30719db974d6452793fe191b3638a5777005485bae145924044530ffa5f6122 +JAVA_JUNIT_JAR = junit-$(JAVA_JUNIT_VER).jar +JAVA_JUNIT_JAR_PATH = $(JAVA_TEST_LIBDIR)/$(JAVA_JUNIT_JAR) +JAVA_HAMCREST_VER = 2.2 +JAVA_HAMCREST_SHA256 = 5e62846a89f05cd78cd9c1a553f340d002458380c320455dd1f8fc5497a8a1c1 +JAVA_HAMCREST_JAR = hamcrest-$(JAVA_HAMCREST_VER).jar +JAVA_HAMCREST_JAR_PATH = $(JAVA_TEST_LIBDIR)/$(JAVA_HAMCREST_JAR) +JAVA_MOCKITO_VER = 1.10.19 +JAVA_MOCKITO_SHA256 = d1a7a7ef14b3db5c0fc3e0a63a81b374b510afe85add9f7984b97911f4c70605 +JAVA_MOCKITO_JAR = mockito-all-$(JAVA_MOCKITO_VER).jar +JAVA_MOCKITO_JAR_PATH = $(JAVA_TEST_LIBDIR)/$(JAVA_MOCKITO_JAR) +JAVA_CGLIB_VER = 3.3.0 +JAVA_CGLIB_SHA256 = 9fe0c26d7464140ccdfe019ac687be1fb906122b508ab54beb810db0f09a9212 +JAVA_CGLIB_JAR = cglib-$(JAVA_CGLIB_VER).jar +JAVA_CGLIB_JAR_PATH = $(JAVA_TEST_LIBDIR)/$(JAVA_CGLIB_JAR) +JAVA_ASSERTJ_VER = 2.9.0 +JAVA_ASSERTJ_SHA256 = 5e88ea3ecbe3c48aa1346fec76c84979fa9c8d22499f11479011691230e8babf +JAVA_ASSERTJ_JAR = assertj-core-$(JAVA_ASSERTJ_VER).jar +JAVA_ASSERTJ_JAR_PATH = $(JAVA_TEST_LIBDIR)/$(JAVA_ASSERTJ_JAR) +JAVA_TESTCLASSPATH = $(JAVA_JUNIT_JAR_PATH):$(JAVA_HAMCREST_JAR_PATH):$(JAVA_MOCKITO_JAR_PATH):$(JAVA_CGLIB_JAR_PATH):$(JAVA_ASSERTJ_JAR_PATH) + +MVN_LOCAL = ~/.m2/repository + +# Set the path of the java commands +ifeq ($(JAVA_CMD),) +ifneq ($(JAVA_HOME),) +JAVA_CMD := $(JAVA_HOME)/bin/java +else +JAVA_CMD := java +endif +endif + +ifeq ($(JAVAC_CMD),) +ifneq ($(JAVA_HOME),) +JAVAC_CMD := $(JAVA_HOME)/bin/javac +else +JAVAC_CMD := javac +endif +endif + +ifeq ($(JAVADOC_CMD),) +ifneq ($(JAVA_HOME),) +JAVADOC_CMD := $(JAVA_HOME)/bin/javadoc +else +JAVADOC_CMD := javadoc +endif +endif + +# Look for the Java version (1.6->6, 1.7->7, 1.8->8, 11.0->11, 13.0->13, 15.0->15 etc..) +JAVAC_VERSION := $(shell $(JAVAC_CMD) -version 2>&1) +JAVAC_MAJOR_VERSION := $(word 2,$(subst ., ,$(JAVAC_VERSION))) +ifeq ($(JAVAC_MAJOR_VERSION),1) +JAVAC_MAJOR_VERSION := $(word 3,$(subst ., ,$(JAVAC_VERSION))) +endif + +# Test whether the version we see meets our minimum +MIN_JAVAC_MAJOR_VERSION := 8 +JAVAC_VERSION_GE_MIN := $(shell [ $(JAVAC_MAJOR_VERSION) -ge $(MIN_JAVAC_MAJOR_VERSION) ] > /dev/null 2>&1 && echo true) + +# Set the default JAVA_ARGS to "" for DEBUG_LEVEL=0 +JAVA_ARGS ?= + +JAVAC_ARGS ?= + +# Read plugin configuration +PLUGIN_PATH = ../plugin +ROCKSDB_PLUGIN_MKS = $(foreach plugin, $(ROCKSDB_PLUGINS), $(PLUGIN_PATH)/$(plugin)/*.mk) +include $(ROCKSDB_PLUGIN_MKS) + +# Add paths to Java sources in plugins +ROCKSDB_PLUGIN_JAVA_ROOTS = $(foreach plugin, $(ROCKSDB_PLUGINS), $(PLUGIN_PATH)/$(plugin)/java) +PLUGIN_SOURCES = $(foreach root, $(ROCKSDB_PLUGIN_JAVA_ROOTS), $(foreach pkg, org/rocksdb/util org/rocksdb, $(root)/$(MAIN_SRC)/$(pkg)/*.java)) +CORE_SOURCES = $(foreach pkg, org/rocksdb/util org/rocksdb, $(MAIN_SRC)/$(pkg)/*.java) +SOURCES = $(wildcard $(CORE_SOURCES) $(PLUGIN_SOURCES)) +PLUGIN_TEST_SOURCES = $(foreach root, $(ROCKSDB_PLUGIN_JAVA_ROOTS), $(foreach pkg, org/rocksdb/test org/rocksdb/util org/rocksdb, $(root)/$(TEST_SRC)/$(pkg)/*.java)) +CORE_TEST_SOURCES = $(foreach pkg, org/rocksdb/test org/rocksdb/util org/rocksdb, $(TEST_SRC)/$(pkg)/*.java) +TEST_SOURCES = $(wildcard $(CORE_TEST_SOURCES) $(PLUGIN_TEST_SOURCES)) + +# Configure the plugin tests and java classes +ROCKSDB_PLUGIN_NATIVE_JAVA_CLASSES = $(foreach plugin, $(ROCKSDB_PLUGINS), $(foreach class, $($(plugin)_NATIVE_JAVA_CLASSES), $(class))) +NATIVE_JAVA_CLASSES = $(NATIVE_JAVA_CLASSES) $(ROCKSDB_PLUGIN_NATIVE_JAVA_CLASSES) +ROCKSDB_PLUGIN_JAVA_TESTS = $(foreach plugin, $(ROCKSDB_PLUGINS), $(foreach testclass, $($(plugin)_JAVA_TESTS), $(testclass))) +ALL_JAVA_TESTS = $(JAVA_TESTS) $(ROCKSDB_PLUGIN_JAVA_TESTS) + +# When debugging add -Xcheck:jni to the java args +ifneq ($(DEBUG_LEVEL),0) + JAVA_ARGS += -ea -Xcheck:jni + JAVAC_ARGS += -Xlint:deprecation -Xlint:unchecked +endif + +# Using a Facebook AWS account for S3 storage. (maven.org has a history +# of failing in Travis builds.) +DEPS_URL?=https://rocksdb-deps.s3-us-west-2.amazonaws.com/jars + +java-version: +ifneq ($(JAVAC_VERSION_GE_MIN),true) + echo 'Java version is $(JAVAC_VERSION), minimum required version is $(MIN_JAVAC_MAJOR_VERSION)' + exit 1 +endif + +clean: clean-not-downloaded clean-downloaded + +clean-not-downloaded: + $(AM_V_at)rm -rf $(NATIVE_INCLUDE) + $(AM_V_at)rm -rf $(OUTPUT) + $(AM_V_at)rm -rf $(BENCHMARK_OUTPUT) + $(AM_V_at)rm -rf $(SAMPLES_OUTPUT) + +clean-downloaded: + $(AM_V_at)rm -rf $(JAVA_TEST_LIBDIR) + + +javadocs: java + $(AM_V_GEN)mkdir -p $(JAVADOC) + $(AM_V_at)$(JAVADOC_CMD) -d $(JAVADOC) -sourcepath $(MAIN_SRC) -subpackages org + +javalib: java java_test javadocs + +java: java-version + $(AM_V_GEN)mkdir -p $(MAIN_CLASSES) + $(AM_V_at) $(JAVAC_CMD) $(JAVAC_ARGS) -h $(NATIVE_INCLUDE) -d $(MAIN_CLASSES) $(SOURCES) + $(AM_V_at)@cp ../HISTORY.md ./HISTORY-CPP.md + $(AM_V_at)@rm -f ./HISTORY-CPP.md + +sample: java + $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES) + $(AM_V_at)$(JAVAC_CMD) $(JAVAC_ARGS) -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBSample.java + $(AM_V_at)@rm -rf /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found + $(JAVA_CMD) $(JAVA_ARGS) -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBSample /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found + +column_family_sample: java + $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES) + $(AM_V_at)$(JAVAC_CMD) $(JAVAC_ARGS) -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBColumnFamilySample.java + $(AM_V_at)@rm -rf /tmp/rocksdbjni + $(JAVA_CMD) $(JAVA_ARGS) -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBColumnFamilySample /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni + +transaction_sample: java + $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES) + $(AM_V_at)$(JAVAC_CMD) -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/TransactionSample.java + $(AM_V_at)@rm -rf /tmp/rocksdbjni + $(JAVA_CMD) -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) TransactionSample /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni + +optimistic_transaction_sample: java + $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES) + $(AM_V_at)$(JAVAC_CMD) -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/OptimisticTransactionSample.java + $(AM_V_at)@rm -rf /tmp/rocksdbjni + $(JAVA_CMD) -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) OptimisticTransactionSample /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni + +$(JAVA_TEST_LIBDIR): + mkdir -p "$(JAVA_TEST_LIBDIR)" + +$(JAVA_JUNIT_JAR_PATH): $(JAVA_TEST_LIBDIR) +ifneq (,$(wildcard $(MVN_LOCAL)/junit/junit/$(JAVA_JUNIT_VER)/$(JAVA_JUNIT_JAR))) + cp -v $(MVN_LOCAL)/junit/junit/$(JAVA_JUNIT_VER)/$(JAVA_JUNIT_JAR) $(JAVA_TEST_LIBDIR) +else + curl --fail --insecure --output $(JAVA_JUNIT_JAR_PATH) --location $(DEPS_URL)/$(JAVA_JUNIT_JAR) + JAVA_JUNIT_SHA256_ACTUAL=`$(SHA256_CMD) $(JAVA_JUNIT_JAR_PATH) | cut -d ' ' -f 1`; \ + if [ "$(JAVA_JUNIT_SHA256)" != "$$JAVA_JUNIT_SHA256_ACTUAL" ]; then \ + echo $(JAVA_JUNIT_JAR_PATH) checksum mismatch, expected=\"$(JAVA_JUNIT_SHA256)\" actual=\"$$JAVA_JUNIT_SHA256_ACTUAL\"; \ + exit 1; \ + fi +endif + +$(JAVA_HAMCREST_JAR_PATH): $(JAVA_TEST_LIBDIR) +ifneq (,$(wildcard $(MVN_LOCAL)/org/hamcrest/hamcrest/$(JAVA_HAMCREST_VER)/$(JAVA_HAMCREST_JAR))) + cp -v $(MVN_LOCAL)/org/hamcrest/hamcrest/$(JAVA_HAMCREST_VER)/$(JAVA_HAMCREST_JAR) $(JAVA_TEST_LIBDIR) +else + curl --fail --insecure --output $(JAVA_HAMCREST_JAR_PATH) --location $(DEPS_URL)/$(JAVA_HAMCREST_JAR) + JAVA_HAMCREST_SHA256_ACTUAL=`$(SHA256_CMD) $(JAVA_HAMCREST_JAR_PATH) | cut -d ' ' -f 1`; \ + if [ "$(JAVA_HAMCREST_SHA256)" != "$$JAVA_HAMCREST_SHA256_ACTUAL" ]; then \ + echo $(JAVA_HAMCREST_JAR_PATH) checksum mismatch, expected=\"$(JAVA_HAMCREST_SHA256)\" actual=\"$$JAVA_HAMCREST_SHA256_ACTUAL\"; \ + exit 1; \ + fi +endif + +$(JAVA_MOCKITO_JAR_PATH): $(JAVA_TEST_LIBDIR) +ifneq (,$(wildcard $(MVN_LOCAL)/org/mockito/mockito-all/$(JAVA_MOCKITO_VER)/$(JAVA_MOCKITO_JAR))) + cp -v $(MVN_LOCAL)/org/mockito/mockito-all/$(JAVA_MOCKITO_VER)/$(JAVA_MOCKITO_JAR) $(JAVA_TEST_LIBDIR) +else + curl --fail --insecure --output "$(JAVA_MOCKITO_JAR_PATH)" --location $(DEPS_URL)/$(JAVA_MOCKITO_JAR) + JAVA_MOCKITO_SHA256_ACTUAL=`$(SHA256_CMD) $(JAVA_MOCKITO_JAR_PATH) | cut -d ' ' -f 1`; \ + if [ "$(JAVA_MOCKITO_SHA256)" != "$$JAVA_MOCKITO_SHA256_ACTUAL" ]; then \ + echo $(JAVA_MOCKITO_JAR_PATH) checksum mismatch, expected=\"$(JAVA_MOCKITO_SHA256)\" actual=\"$$JAVA_MOCKITO_SHA256_ACTUAL\"; \ + exit 1; \ + fi +endif + +$(JAVA_CGLIB_JAR_PATH): $(JAVA_TEST_LIBDIR) +ifneq (,$(wildcard $(MVN_LOCAL)/cglib/cglib/$(JAVA_CGLIB_VER)/$(JAVA_CGLIB_JAR))) + cp -v $(MVN_LOCAL)/cglib/cglib/$(JAVA_CGLIB_VER)/$(JAVA_CGLIB_JAR) $(JAVA_TEST_LIBDIR) +else + curl --fail --insecure --output "$(JAVA_CGLIB_JAR_PATH)" --location $(DEPS_URL)/$(JAVA_CGLIB_JAR) + JAVA_CGLIB_SHA256_ACTUAL=`$(SHA256_CMD) $(JAVA_CGLIB_JAR_PATH) | cut -d ' ' -f 1`; \ + if [ "$(JAVA_CGLIB_SHA256)" != "$$JAVA_CGLIB_SHA256_ACTUAL" ]; then \ + echo $(JAVA_CGLIB_JAR_PATH) checksum mismatch, expected=\"$(JAVA_CGLIB_SHA256)\" actual=\"$$JAVA_CGLIB_SHA256_ACTUAL\"; \ + exit 1; \ + fi +endif + +$(JAVA_ASSERTJ_JAR_PATH): $(JAVA_TEST_LIBDIR) +ifneq (,$(wildcard $(MVN_LOCAL)/org/assertj/assertj-core/$(JAVA_ASSERTJ_VER)/$(JAVA_ASSERTJ_JAR))) + cp -v $(MVN_LOCAL)/org/assertj/assertj-core/$(JAVA_ASSERTJ_VER)/$(JAVA_ASSERTJ_JAR) $(JAVA_TEST_LIBDIR) +else + curl --fail --insecure --output "$(JAVA_ASSERTJ_JAR_PATH)" --location $(DEPS_URL)/$(JAVA_ASSERTJ_JAR) + JAVA_ASSERTJ_SHA256_ACTUAL=`$(SHA256_CMD) $(JAVA_ASSERTJ_JAR_PATH) | cut -d ' ' -f 1`; \ + if [ "$(JAVA_ASSERTJ_SHA256)" != "$$JAVA_ASSERTJ_SHA256_ACTUAL" ]; then \ + echo $(JAVA_ASSERTJ_JAR_PATH) checksum mismatch, expected=\"$(JAVA_ASSERTJ_SHA256)\" actual=\"$$JAVA_ASSERTJ_SHA256_ACTUAL\"; \ + exit 1; \ + fi +endif + +resolve_test_deps: $(JAVA_JUNIT_JAR_PATH) $(JAVA_HAMCREST_JAR_PATH) $(JAVA_MOCKITO_JAR_PATH) $(JAVA_CGLIB_JAR_PATH) $(JAVA_ASSERTJ_JAR_PATH) + +java_test: java resolve_test_deps + $(AM_V_GEN)mkdir -p $(TEST_CLASSES) + $(AM_V_at) $(JAVAC_CMD) $(JAVAC_ARGS) -cp $(MAIN_CLASSES):$(JAVA_TESTCLASSPATH) -h $(NATIVE_INCLUDE) -d $(TEST_CLASSES)\ + $(TEST_SOURCES) + +test: java java_test + $(MAKE) run_test + +run_test: + $(JAVA_CMD) $(JAVA_ARGS) -Djava.library.path=target -cp "$(MAIN_CLASSES):$(TEST_CLASSES):$(JAVA_TESTCLASSPATH):target/*" org.rocksdb.test.RocksJunitRunner $(ALL_JAVA_TESTS) + +run_plugin_test: + $(JAVA_CMD) $(JAVA_ARGS) -Djava.library.path=target -cp "$(MAIN_CLASSES):$(TEST_CLASSES):$(JAVA_TESTCLASSPATH):target/*" org.rocksdb.test.RocksJunitRunner $(ROCKSDB_PLUGIN_JAVA_TESTS) + +db_bench: java + $(AM_V_GEN)mkdir -p $(BENCHMARK_MAIN_CLASSES) + $(AM_V_at)$(JAVAC_CMD) $(JAVAC_ARGS) -cp $(MAIN_CLASSES) -d $(BENCHMARK_MAIN_CLASSES) $(BENCHMARK_MAIN_SRC)/org/rocksdb/benchmark/*.java diff --git a/src/rocksdb/java/RELEASE.md b/src/rocksdb/java/RELEASE.md new file mode 100644 index 000000000..dda19455f --- /dev/null +++ b/src/rocksdb/java/RELEASE.md @@ -0,0 +1,59 @@ +## Cross-building + +RocksDB can be built as a single self contained cross-platform JAR. The cross-platform jar can be used on any 64-bit OSX system, 32-bit Linux system, or 64-bit Linux system. + +Building a cross-platform JAR requires: + + * [Docker](https://www.docker.com/docker-community) + * A Mac OSX machine that can compile RocksDB. + * Java 7 set as JAVA_HOME. + +Once you have these items, run this make command from RocksDB's root source directory: + + make jclean clean rocksdbjavastaticreleasedocker + +This command will build RocksDB natively on OSX, and will then spin up docker containers to build RocksDB for 32-bit and 64-bit Linux with glibc, and 32-bit and 64-bit Linux with musl libc. + +You can find all native binaries and JARs in the java/target directory upon completion: + + librocksdbjni-linux32.so + librocksdbjni-linux64.so + librocksdbjni-linux64-musl.so + librocksdbjni-linux32-musl.so + librocksdbjni-osx.jnilib + rocksdbjni-x.y.z-javadoc.jar + rocksdbjni-x.y.z-linux32.jar + rocksdbjni-x.y.z-linux64.jar + rocksdbjni-x.y.z-linux64-musl.jar + rocksdbjni-x.y.z-linux32-musl.jar + rocksdbjni-x.y.z-osx.jar + rocksdbjni-x.y.z-sources.jar + rocksdbjni-x.y.z.jar + +Where x.y.z is the built version number of RocksDB. + +## Maven publication + +Set ~/.m2/settings.xml to contain: + + + + + sonatype-nexus-staging + your-sonatype-jira-username + your-sonatype-jira-password + + + + +From RocksDB's root directory, first build the Java static JARs: + + make jclean clean rocksdbjavastaticpublish + +This command will [stage the JAR artifacts on the Sonatype staging repository](http://central.sonatype.org/pages/manual-staging-bundle-creation-and-deployment.html). To release the staged artifacts. + +1. Go to [https://oss.sonatype.org/#stagingRepositories](https://oss.sonatype.org/#stagingRepositories) and search for "rocksdb" in the upper right hand search box. +2. Select the rocksdb staging repository, and inspect its contents. +3. If all is well, follow [these steps](https://oss.sonatype.org/#stagingRepositories) to close the repository and release it. + +After the release has occurred, the artifacts will be synced to Maven central within 24-48 hours. diff --git a/src/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java b/src/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java new file mode 100644 index 000000000..070f0fe75 --- /dev/null +++ b/src/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java @@ -0,0 +1,1640 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +/** + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.rocksdb.benchmark; + +import java.io.IOException; +import java.lang.Runnable; +import java.lang.Math; +import java.io.File; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.util.Collection; +import java.util.Date; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.Arrays; +import java.util.ArrayList; +import java.util.concurrent.Callable; +import java.util.concurrent.Executors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import org.rocksdb.*; +import org.rocksdb.RocksMemEnv; +import org.rocksdb.util.SizeUnit; + +class Stats { + int id_; + long start_; + long finish_; + double seconds_; + long done_; + long found_; + long lastOpTime_; + long nextReport_; + long bytes_; + StringBuilder message_; + boolean excludeFromMerge_; + + // TODO(yhchiang): use the following arguments: + // (Long)Flag.stats_interval + // (Integer)Flag.stats_per_interval + + Stats(int id) { + id_ = id; + nextReport_ = 100; + done_ = 0; + bytes_ = 0; + seconds_ = 0; + start_ = System.nanoTime(); + lastOpTime_ = start_; + finish_ = start_; + found_ = 0; + message_ = new StringBuilder(""); + excludeFromMerge_ = false; + } + + void merge(final Stats other) { + if (other.excludeFromMerge_) { + return; + } + + done_ += other.done_; + found_ += other.found_; + bytes_ += other.bytes_; + seconds_ += other.seconds_; + if (other.start_ < start_) start_ = other.start_; + if (other.finish_ > finish_) finish_ = other.finish_; + + // Just keep the messages from one thread + if (message_.length() == 0) { + message_ = other.message_; + } + } + + void stop() { + finish_ = System.nanoTime(); + seconds_ = (double) (finish_ - start_) * 1e-9; + } + + void addMessage(String msg) { + if (message_.length() > 0) { + message_.append(" "); + } + message_.append(msg); + } + + void setId(int id) { id_ = id; } + void setExcludeFromMerge() { excludeFromMerge_ = true; } + + void finishedSingleOp(int bytes) { + done_++; + lastOpTime_ = System.nanoTime(); + bytes_ += bytes; + if (done_ >= nextReport_) { + if (nextReport_ < 1000) { + nextReport_ += 100; + } else if (nextReport_ < 5000) { + nextReport_ += 500; + } else if (nextReport_ < 10000) { + nextReport_ += 1000; + } else if (nextReport_ < 50000) { + nextReport_ += 5000; + } else if (nextReport_ < 100000) { + nextReport_ += 10000; + } else if (nextReport_ < 500000) { + nextReport_ += 50000; + } else { + nextReport_ += 100000; + } + System.err.printf("... Task %s finished %d ops%30s\r", id_, done_, ""); + } + } + + void report(String name) { + // Pretend at least one op was done in case we are running a benchmark + // that does not call FinishedSingleOp(). + if (done_ < 1) done_ = 1; + + StringBuilder extra = new StringBuilder(""); + if (bytes_ > 0) { + // Rate is computed on actual elapsed time, not the sum of per-thread + // elapsed times. + double elapsed = (finish_ - start_) * 1e-9; + extra.append(String.format("%6.1f MB/s", (bytes_ / 1048576.0) / elapsed)); + } + extra.append(message_.toString()); + double elapsed = (finish_ - start_); + double throughput = (double) done_ / (elapsed * 1e-9); + + System.out.format("%-12s : %11.3f micros/op %d ops/sec;%s%s\n", + name, (elapsed * 1e-6) / done_, + (long) throughput, (extra.length() == 0 ? "" : " "), extra.toString()); + } +} + +public class DbBenchmark { + enum Order { + SEQUENTIAL, + RANDOM + } + + enum DBState { + FRESH, + EXISTING + } + + static { + RocksDB.loadLibrary(); + } + + abstract class BenchmarkTask implements Callable { + // TODO(yhchiang): use (Integer)Flag.perf_level. + public BenchmarkTask( + int tid, long randSeed, long numEntries, long keyRange) { + tid_ = tid; + rand_ = new Random(randSeed + tid * 1000); + numEntries_ = numEntries; + keyRange_ = keyRange; + stats_ = new Stats(tid); + } + + @Override public Stats call() throws RocksDBException { + stats_.start_ = System.nanoTime(); + runTask(); + stats_.finish_ = System.nanoTime(); + return stats_; + } + + abstract protected void runTask() throws RocksDBException; + + protected int tid_; + protected Random rand_; + protected long numEntries_; + protected long keyRange_; + protected Stats stats_; + + protected void getFixedKey(byte[] key, long sn) { + generateKeyFromLong(key, sn); + } + + protected void getRandomKey(byte[] key, long range) { + generateKeyFromLong(key, Math.abs(rand_.nextLong() % range)); + } + } + + abstract class WriteTask extends BenchmarkTask { + public WriteTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch) { + super(tid, randSeed, numEntries, keyRange); + writeOpt_ = writeOpt; + entriesPerBatch_ = entriesPerBatch; + maxWritesPerSecond_ = -1; + } + + public WriteTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch, long maxWritesPerSecond) { + super(tid, randSeed, numEntries, keyRange); + writeOpt_ = writeOpt; + entriesPerBatch_ = entriesPerBatch; + maxWritesPerSecond_ = maxWritesPerSecond; + } + + @Override public void runTask() throws RocksDBException { + if (numEntries_ != DbBenchmark.this.num_) { + stats_.message_.append(String.format(" (%d ops)", numEntries_)); + } + byte[] key = new byte[keySize_]; + byte[] value = new byte[valueSize_]; + + try { + if (entriesPerBatch_ == 1) { + for (long i = 0; i < numEntries_; ++i) { + getKey(key, i, keyRange_); + DbBenchmark.this.gen_.generate(value); + db_.put(writeOpt_, key, value); + stats_.finishedSingleOp(keySize_ + valueSize_); + writeRateControl(i); + if (isFinished()) { + return; + } + } + } else { + for (long i = 0; i < numEntries_; i += entriesPerBatch_) { + WriteBatch batch = new WriteBatch(); + for (long j = 0; j < entriesPerBatch_; j++) { + getKey(key, i + j, keyRange_); + DbBenchmark.this.gen_.generate(value); + batch.put(key, value); + stats_.finishedSingleOp(keySize_ + valueSize_); + } + db_.write(writeOpt_, batch); + batch.dispose(); + writeRateControl(i); + if (isFinished()) { + return; + } + } + } + } catch (InterruptedException e) { + // thread has been terminated. + } + } + + protected void writeRateControl(long writeCount) + throws InterruptedException { + if (maxWritesPerSecond_ <= 0) return; + long minInterval = + writeCount * TimeUnit.SECONDS.toNanos(1) / maxWritesPerSecond_; + long interval = System.nanoTime() - stats_.start_; + if (minInterval - interval > TimeUnit.MILLISECONDS.toNanos(1)) { + TimeUnit.NANOSECONDS.sleep(minInterval - interval); + } + } + + abstract protected void getKey(byte[] key, long id, long range); + protected WriteOptions writeOpt_; + protected long entriesPerBatch_; + protected long maxWritesPerSecond_; + } + + class WriteSequentialTask extends WriteTask { + public WriteSequentialTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch); + } + public WriteSequentialTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch, + long maxWritesPerSecond) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch, + maxWritesPerSecond); + } + @Override protected void getKey(byte[] key, long id, long range) { + getFixedKey(key, id); + } + } + + class WriteRandomTask extends WriteTask { + public WriteRandomTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch); + } + public WriteRandomTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch, + long maxWritesPerSecond) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch, + maxWritesPerSecond); + } + @Override protected void getKey(byte[] key, long id, long range) { + getRandomKey(key, range); + } + } + + class WriteUniqueRandomTask extends WriteTask { + static final int MAX_BUFFER_SIZE = 10000000; + public WriteUniqueRandomTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch); + initRandomKeySequence(); + } + public WriteUniqueRandomTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch, + long maxWritesPerSecond) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch, + maxWritesPerSecond); + initRandomKeySequence(); + } + @Override protected void getKey(byte[] key, long id, long range) { + generateKeyFromLong(key, nextUniqueRandom()); + } + + protected void initRandomKeySequence() { + bufferSize_ = MAX_BUFFER_SIZE; + if (bufferSize_ > keyRange_) { + bufferSize_ = (int) keyRange_; + } + currentKeyCount_ = bufferSize_; + keyBuffer_ = new long[MAX_BUFFER_SIZE]; + for (int k = 0; k < bufferSize_; ++k) { + keyBuffer_[k] = k; + } + } + + /** + * Semi-randomly return the next unique key. It is guaranteed to be + * fully random if keyRange_ <= MAX_BUFFER_SIZE. + */ + long nextUniqueRandom() { + if (bufferSize_ == 0) { + System.err.println("bufferSize_ == 0."); + return 0; + } + int r = rand_.nextInt(bufferSize_); + // randomly pick one from the keyBuffer + long randKey = keyBuffer_[r]; + if (currentKeyCount_ < keyRange_) { + // if we have not yet inserted all keys, insert next new key to [r]. + keyBuffer_[r] = currentKeyCount_++; + } else { + // move the last element to [r] and decrease the size by 1. + keyBuffer_[r] = keyBuffer_[--bufferSize_]; + } + return randKey; + } + + int bufferSize_; + long currentKeyCount_; + long[] keyBuffer_; + } + + class ReadRandomTask extends BenchmarkTask { + public ReadRandomTask( + int tid, long randSeed, long numEntries, long keyRange) { + super(tid, randSeed, numEntries, keyRange); + } + @Override public void runTask() throws RocksDBException { + byte[] key = new byte[keySize_]; + byte[] value = new byte[valueSize_]; + for (long i = 0; i < numEntries_; i++) { + getRandomKey(key, keyRange_); + int len = db_.get(key, value); + if (len != RocksDB.NOT_FOUND) { + stats_.found_++; + stats_.finishedSingleOp(keySize_ + valueSize_); + } else { + stats_.finishedSingleOp(keySize_); + } + if (isFinished()) { + return; + } + } + } + } + + class ReadSequentialTask extends BenchmarkTask { + public ReadSequentialTask( + int tid, long randSeed, long numEntries, long keyRange) { + super(tid, randSeed, numEntries, keyRange); + } + @Override public void runTask() throws RocksDBException { + RocksIterator iter = db_.newIterator(); + long i; + for (iter.seekToFirst(), i = 0; + iter.isValid() && i < numEntries_; + iter.next(), ++i) { + stats_.found_++; + stats_.finishedSingleOp(iter.key().length + iter.value().length); + if (isFinished()) { + iter.dispose(); + return; + } + } + iter.dispose(); + } + } + + public DbBenchmark(Map flags) throws Exception { + benchmarks_ = (List) flags.get(Flag.benchmarks); + num_ = (Integer) flags.get(Flag.num); + threadNum_ = (Integer) flags.get(Flag.threads); + reads_ = (Integer) (flags.get(Flag.reads) == null ? + flags.get(Flag.num) : flags.get(Flag.reads)); + keySize_ = (Integer) flags.get(Flag.key_size); + valueSize_ = (Integer) flags.get(Flag.value_size); + compressionRatio_ = (Double) flags.get(Flag.compression_ratio); + useExisting_ = (Boolean) flags.get(Flag.use_existing_db); + randSeed_ = (Long) flags.get(Flag.seed); + databaseDir_ = (String) flags.get(Flag.db); + writesPerSeconds_ = (Integer) flags.get(Flag.writes_per_second); + memtable_ = (String) flags.get(Flag.memtablerep); + maxWriteBufferNumber_ = (Integer) flags.get(Flag.max_write_buffer_number); + prefixSize_ = (Integer) flags.get(Flag.prefix_size); + keysPerPrefix_ = (Integer) flags.get(Flag.keys_per_prefix); + hashBucketCount_ = (Long) flags.get(Flag.hash_bucket_count); + usePlainTable_ = (Boolean) flags.get(Flag.use_plain_table); + useMemenv_ = (Boolean) flags.get(Flag.use_mem_env); + flags_ = flags; + finishLock_ = new Object(); + // options.setPrefixSize((Integer)flags_.get(Flag.prefix_size)); + // options.setKeysPerPrefix((Long)flags_.get(Flag.keys_per_prefix)); + compressionType_ = (String) flags.get(Flag.compression_type); + compression_ = CompressionType.NO_COMPRESSION; + try { + if (compressionType_!=null) { + final CompressionType compressionType = + CompressionType.getCompressionType(compressionType_); + if (compressionType != null && + compressionType != CompressionType.NO_COMPRESSION) { + System.loadLibrary(compressionType.getLibraryName()); + } + + } + } catch (UnsatisfiedLinkError e) { + System.err.format("Unable to load %s library:%s%n" + + "No compression is used.%n", + compressionType_, e.toString()); + compressionType_ = "none"; + } + gen_ = new RandomGenerator(randSeed_, compressionRatio_); + } + + private void prepareReadOptions(ReadOptions options) { + options.setVerifyChecksums((Boolean)flags_.get(Flag.verify_checksum)); + options.setTailing((Boolean)flags_.get(Flag.use_tailing_iterator)); + } + + private void prepareWriteOptions(WriteOptions options) { + options.setSync((Boolean)flags_.get(Flag.sync)); + options.setDisableWAL((Boolean)flags_.get(Flag.disable_wal)); + } + + private void prepareOptions(Options options) throws RocksDBException { + if (!useExisting_) { + options.setCreateIfMissing(true); + } else { + options.setCreateIfMissing(false); + } + if (useMemenv_) { + options.setEnv(new RocksMemEnv(Env.getDefault())); + } + switch (memtable_) { + case "skip_list": + options.setMemTableConfig(new SkipListMemTableConfig()); + break; + case "vector": + options.setMemTableConfig(new VectorMemTableConfig()); + break; + case "hash_linkedlist": + options.setMemTableConfig( + new HashLinkedListMemTableConfig() + .setBucketCount(hashBucketCount_)); + options.useFixedLengthPrefixExtractor(prefixSize_); + break; + case "hash_skiplist": + case "prefix_hash": + options.setMemTableConfig( + new HashSkipListMemTableConfig() + .setBucketCount(hashBucketCount_)); + options.useFixedLengthPrefixExtractor(prefixSize_); + break; + default: + System.err.format( + "unable to detect the specified memtable, " + + "use the default memtable factory %s%n", + options.memTableFactoryName()); + break; + } + if (usePlainTable_) { + options.setTableFormatConfig( + new PlainTableConfig().setKeySize(keySize_)); + } else { + BlockBasedTableConfig table_options = new BlockBasedTableConfig(); + table_options.setBlockSize((Long)flags_.get(Flag.block_size)) + .setBlockCacheSize((Long)flags_.get(Flag.cache_size)) + .setCacheNumShardBits( + (Integer)flags_.get(Flag.cache_numshardbits)); + options.setTableFormatConfig(table_options); + } + options.setWriteBufferSize( + (Long)flags_.get(Flag.write_buffer_size)); + options.setMaxWriteBufferNumber( + (Integer)flags_.get(Flag.max_write_buffer_number)); + options.setMaxBackgroundCompactions( + (Integer)flags_.get(Flag.max_background_compactions)); + options.getEnv().setBackgroundThreads( + (Integer)flags_.get(Flag.max_background_compactions)); + options.setMaxBackgroundFlushes( + (Integer)flags_.get(Flag.max_background_flushes)); + options.setMaxBackgroundJobs((Integer) flags_.get(Flag.max_background_jobs)); + options.setMaxOpenFiles( + (Integer)flags_.get(Flag.open_files)); + options.setUseFsync( + (Boolean)flags_.get(Flag.use_fsync)); + options.setWalDir( + (String)flags_.get(Flag.wal_dir)); + options.setDeleteObsoleteFilesPeriodMicros( + (Integer)flags_.get(Flag.delete_obsolete_files_period_micros)); + options.setTableCacheNumshardbits( + (Integer)flags_.get(Flag.table_cache_numshardbits)); + options.setAllowMmapReads( + (Boolean)flags_.get(Flag.mmap_read)); + options.setAllowMmapWrites( + (Boolean)flags_.get(Flag.mmap_write)); + options.setAdviseRandomOnOpen( + (Boolean)flags_.get(Flag.advise_random_on_open)); + options.setUseAdaptiveMutex( + (Boolean)flags_.get(Flag.use_adaptive_mutex)); + options.setBytesPerSync( + (Long)flags_.get(Flag.bytes_per_sync)); + options.setBloomLocality( + (Integer)flags_.get(Flag.bloom_locality)); + options.setMinWriteBufferNumberToMerge( + (Integer)flags_.get(Flag.min_write_buffer_number_to_merge)); + options.setMemtablePrefixBloomSizeRatio((Double) flags_.get(Flag.memtable_bloom_size_ratio)); + options.setMemtableWholeKeyFiltering((Boolean) flags_.get(Flag.memtable_whole_key_filtering)); + options.setNumLevels( + (Integer)flags_.get(Flag.num_levels)); + options.setTargetFileSizeBase( + (Integer)flags_.get(Flag.target_file_size_base)); + options.setTargetFileSizeMultiplier((Integer)flags_.get(Flag.target_file_size_multiplier)); + options.setMaxBytesForLevelBase( + (Integer)flags_.get(Flag.max_bytes_for_level_base)); + options.setMaxBytesForLevelMultiplier((Double) flags_.get(Flag.max_bytes_for_level_multiplier)); + options.setLevelZeroStopWritesTrigger( + (Integer)flags_.get(Flag.level0_stop_writes_trigger)); + options.setLevelZeroSlowdownWritesTrigger( + (Integer)flags_.get(Flag.level0_slowdown_writes_trigger)); + options.setLevelZeroFileNumCompactionTrigger( + (Integer)flags_.get(Flag.level0_file_num_compaction_trigger)); + options.setMaxCompactionBytes( + (Long) flags_.get(Flag.max_compaction_bytes)); + options.setDisableAutoCompactions( + (Boolean)flags_.get(Flag.disable_auto_compactions)); + options.setMaxSuccessiveMerges( + (Integer)flags_.get(Flag.max_successive_merges)); + options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds)); + options.setWalSizeLimitMB((Long)flags_.get(Flag.wal_size_limit_MB)); + if(flags_.get(Flag.java_comparator) != null) { + options.setComparator( + (AbstractComparator)flags_.get(Flag.java_comparator)); + } + + /* TODO(yhchiang): enable the following parameters + options.setCompressionType((String)flags_.get(Flag.compression_type)); + options.setCompressionLevel((Integer)flags_.get(Flag.compression_level)); + options.setMinLevelToCompress((Integer)flags_.get(Flag.min_level_to_compress)); + options.setStatistics((Boolean)flags_.get(Flag.statistics)); + options.setUniversalSizeRatio( + (Integer)flags_.get(Flag.universal_size_ratio)); + options.setUniversalMinMergeWidth( + (Integer)flags_.get(Flag.universal_min_merge_width)); + options.setUniversalMaxMergeWidth( + (Integer)flags_.get(Flag.universal_max_merge_width)); + options.setUniversalMaxSizeAmplificationPercent( + (Integer)flags_.get(Flag.universal_max_size_amplification_percent)); + options.setUniversalCompressionSizePercent( + (Integer)flags_.get(Flag.universal_compression_size_percent)); + // TODO(yhchiang): add RocksDB.openForReadOnly() to enable Flag.readonly + // TODO(yhchiang): enable Flag.merge_operator by switch + options.setAccessHintOnCompactionStart( + (String)flags_.get(Flag.compaction_fadvice)); + // available values of fadvice are "NONE", "NORMAL", "SEQUENTIAL", "WILLNEED" for fadvice + */ + } + + private void run() throws RocksDBException { + if (!useExisting_) { + destroyDb(); + } + Options options = new Options(); + prepareOptions(options); + open(options); + + printHeader(options); + + for (String benchmark : benchmarks_) { + List> tasks = new ArrayList>(); + List> bgTasks = new ArrayList>(); + WriteOptions writeOpt = new WriteOptions(); + prepareWriteOptions(writeOpt); + ReadOptions readOpt = new ReadOptions(); + prepareReadOptions(readOpt); + int currentTaskId = 0; + boolean known = true; + + switch (benchmark) { + case "fillseq": + tasks.add(new WriteSequentialTask( + currentTaskId++, randSeed_, num_, num_, writeOpt, 1)); + break; + case "fillbatch": + tasks.add( + new WriteSequentialTask(currentTaskId++, randSeed_, num_, num_, writeOpt, 1000)); + break; + case "fillrandom": + tasks.add(new WriteRandomTask( + currentTaskId++, randSeed_, num_, num_, writeOpt, 1)); + break; + case "filluniquerandom": + tasks.add(new WriteUniqueRandomTask( + currentTaskId++, randSeed_, num_, num_, writeOpt, 1)); + break; + case "fillsync": + writeOpt.setSync(true); + tasks.add(new WriteRandomTask( + currentTaskId++, randSeed_, num_ / 1000, num_ / 1000, + writeOpt, 1)); + break; + case "readseq": + for (int t = 0; t < threadNum_; ++t) { + tasks.add(new ReadSequentialTask( + currentTaskId++, randSeed_, reads_ / threadNum_, num_)); + } + break; + case "readrandom": + for (int t = 0; t < threadNum_; ++t) { + tasks.add(new ReadRandomTask( + currentTaskId++, randSeed_, reads_ / threadNum_, num_)); + } + break; + case "readwhilewriting": + WriteTask writeTask = new WriteRandomTask( + -1, randSeed_, Long.MAX_VALUE, num_, writeOpt, 1, writesPerSeconds_); + writeTask.stats_.setExcludeFromMerge(); + bgTasks.add(writeTask); + for (int t = 0; t < threadNum_; ++t) { + tasks.add(new ReadRandomTask( + currentTaskId++, randSeed_, reads_ / threadNum_, num_)); + } + break; + case "readhot": + for (int t = 0; t < threadNum_; ++t) { + tasks.add(new ReadRandomTask( + currentTaskId++, randSeed_, reads_ / threadNum_, num_ / 100)); + } + break; + case "delete": + destroyDb(); + open(options); + break; + default: + known = false; + System.err.println("Unknown benchmark: " + benchmark); + break; + } + if (known) { + ExecutorService executor = Executors.newCachedThreadPool(); + ExecutorService bgExecutor = Executors.newCachedThreadPool(); + try { + // measure only the main executor time + List> bgResults = new ArrayList>(); + for (Callable bgTask : bgTasks) { + bgResults.add(bgExecutor.submit(bgTask)); + } + start(); + List> results = executor.invokeAll(tasks); + executor.shutdown(); + boolean finished = executor.awaitTermination(10, TimeUnit.SECONDS); + if (!finished) { + System.out.format( + "Benchmark %s was not finished before timeout.", + benchmark); + executor.shutdownNow(); + } + setFinished(true); + bgExecutor.shutdown(); + finished = bgExecutor.awaitTermination(10, TimeUnit.SECONDS); + if (!finished) { + System.out.format( + "Benchmark %s was not finished before timeout.", + benchmark); + bgExecutor.shutdownNow(); + } + + stop(benchmark, results, currentTaskId); + } catch (InterruptedException e) { + System.err.println(e); + } + } + writeOpt.dispose(); + readOpt.dispose(); + } + options.dispose(); + db_.close(); + } + + private void printHeader(Options options) { + int kKeySize = 16; + System.out.printf("Keys: %d bytes each\n", kKeySize); + System.out.printf("Values: %d bytes each (%d bytes after compression)\n", + valueSize_, + (int) (valueSize_ * compressionRatio_ + 0.5)); + System.out.printf("Entries: %d\n", num_); + System.out.printf("RawSize: %.1f MB (estimated)\n", + ((double)(kKeySize + valueSize_) * num_) / SizeUnit.MB); + System.out.printf("FileSize: %.1f MB (estimated)\n", + (((kKeySize + valueSize_ * compressionRatio_) * num_) / SizeUnit.MB)); + System.out.format("Memtable Factory: %s%n", options.memTableFactoryName()); + System.out.format("Prefix: %d bytes%n", prefixSize_); + System.out.format("Compression: %s%n", compressionType_); + printWarnings(); + System.out.printf("------------------------------------------------\n"); + } + + void printWarnings() { + boolean assertsEnabled = false; + assert assertsEnabled = true; // Intentional side effect!!! + if (assertsEnabled) { + System.out.printf( + "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); + } + } + + private void open(Options options) throws RocksDBException { + System.out.println("Using database directory: " + databaseDir_); + db_ = RocksDB.open(options, databaseDir_); + } + + private void start() { + setFinished(false); + startTime_ = System.nanoTime(); + } + + private void stop( + String benchmark, List> results, int concurrentThreads) { + long endTime = System.nanoTime(); + double elapsedSeconds = + 1.0d * (endTime - startTime_) / TimeUnit.SECONDS.toNanos(1); + + Stats stats = new Stats(-1); + int taskFinishedCount = 0; + for (Future result : results) { + if (result.isDone()) { + try { + Stats taskStats = result.get(3, TimeUnit.SECONDS); + if (!result.isCancelled()) { + taskFinishedCount++; + } + stats.merge(taskStats); + } catch (Exception e) { + // then it's not successful, the output will indicate this + } + } + } + String extra = ""; + if (benchmark.indexOf("read") >= 0) { + extra = String.format(" %d / %d found; ", stats.found_, stats.done_); + } else { + extra = String.format(" %d ops done; ", stats.done_); + } + + System.out.printf( + "%-16s : %11.5f micros/op; %6.1f MB/s;%s %d / %d task(s) finished.\n", + benchmark, elapsedSeconds / stats.done_ * 1e6, + (stats.bytes_ / 1048576.0) / elapsedSeconds, extra, + taskFinishedCount, concurrentThreads); + } + + public void generateKeyFromLong(byte[] slice, long n) { + assert(n >= 0); + int startPos = 0; + + if (keysPerPrefix_ > 0) { + long numPrefix = (num_ + keysPerPrefix_ - 1) / keysPerPrefix_; + long prefix = n % numPrefix; + int bytesToFill = Math.min(prefixSize_, 8); + for (int i = 0; i < bytesToFill; ++i) { + slice[i] = (byte) (prefix % 256); + prefix /= 256; + } + for (int i = 8; i < bytesToFill; ++i) { + slice[i] = '0'; + } + startPos = bytesToFill; + } + + for (int i = slice.length - 1; i >= startPos; --i) { + slice[i] = (byte) ('0' + (n % 10)); + n /= 10; + } + } + + private void destroyDb() { + if (db_ != null) { + db_.close(); + } + // TODO(yhchiang): develop our own FileUtil + // FileUtil.deleteDir(databaseDir_); + } + + private void printStats() { + } + + static void printHelp() { + System.out.println("usage:"); + for (Flag flag : Flag.values()) { + System.out.format(" --%s%n\t%s%n", + flag.name(), + flag.desc()); + if (flag.getDefaultValue() != null) { + System.out.format("\tDEFAULT: %s%n", + flag.getDefaultValue().toString()); + } + } + } + + public static void main(String[] args) throws Exception { + Map flags = new EnumMap(Flag.class); + for (Flag flag : Flag.values()) { + if (flag.getDefaultValue() != null) { + flags.put(flag, flag.getDefaultValue()); + } + } + for (String arg : args) { + boolean valid = false; + if (arg.equals("--help") || arg.equals("-h")) { + printHelp(); + System.exit(0); + } + if (arg.startsWith("--")) { + try { + String[] parts = arg.substring(2).split("="); + if (parts.length >= 1) { + Flag key = Flag.valueOf(parts[0]); + if (key != null) { + Object value = null; + if (parts.length >= 2) { + value = key.parseValue(parts[1]); + } + flags.put(key, value); + valid = true; + } + } + } + catch (Exception e) { + } + } + if (!valid) { + System.err.println("Invalid argument " + arg); + System.exit(1); + } + } + new DbBenchmark(flags).run(); + } + + private enum Flag { + benchmarks(Arrays.asList("fillseq", "readrandom", "fillrandom"), + "Comma-separated list of operations to run in the specified order\n" + + "\tActual benchmarks:\n" + + "\t\tfillseq -- write N values in sequential key order in async mode.\n" + + "\t\tfillrandom -- write N values in random key order in async mode.\n" + + "\t\tfillbatch -- write N/1000 batch where each batch has 1000 values\n" + + "\t\t in sequential key order in sync mode.\n" + + "\t\tfillsync -- write N/100 values in random key order in sync mode.\n" + + "\t\tfill100K -- write N/1000 100K values in random order in async mode.\n" + + "\t\treadseq -- read N times sequentially.\n" + + "\t\treadrandom -- read N times in random order.\n" + + "\t\treadhot -- read N times in random order from 1% section of DB.\n" + + "\t\treadwhilewriting -- measure the read performance of multiple readers\n" + + "\t\t with a bg single writer. The write rate of the bg\n" + + "\t\t is capped by --writes_per_second.\n" + + "\tMeta Operations:\n" + + "\t\tdelete -- delete DB") { + @Override public Object parseValue(String value) { + return new ArrayList(Arrays.asList(value.split(","))); + } + }, + compression_ratio(0.5d, + "Arrange to generate values that shrink to this fraction of\n" + + "\ttheir original size after compression.") { + @Override public Object parseValue(String value) { + return Double.parseDouble(value); + } + }, + use_existing_db(false, + "If true, do not destroy the existing database. If you set this\n" + + "\tflag and also specify a benchmark that wants a fresh database,\n" + + "\tthat benchmark will fail.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + num(1000000, + "Number of key/values to place in database.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + threads(1, + "Number of concurrent threads to run.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + reads(null, + "Number of read operations to do. If negative, do --nums reads.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + key_size(16, + "The size of each key in bytes.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + value_size(100, + "The size of each value in bytes.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + write_buffer_size(4L * SizeUnit.MB, + "Number of bytes to buffer in memtable before compacting\n" + + "\t(initialized to default value by 'main'.)") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + max_write_buffer_number(2, + "The number of in-memory memtables. Each memtable is of size\n" + + "\twrite_buffer_size.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + prefix_size(0, "Controls the prefix size for HashSkipList, HashLinkedList,\n" + + "\tand plain table.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + keys_per_prefix(0, "Controls the average number of keys generated\n" + + "\tper prefix, 0 means no special handling of the prefix,\n" + + "\ti.e. use the prefix comes with the generated random number.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + memtablerep("skip_list", + "The memtable format. Available options are\n" + + "\tskip_list,\n" + + "\tvector,\n" + + "\thash_linkedlist,\n" + + "\thash_skiplist (prefix_hash.)") { + @Override public Object parseValue(String value) { + return value; + } + }, + hash_bucket_count(SizeUnit.MB, + "The number of hash buckets used in the hash-bucket-based\n" + + "\tmemtables. Memtables that currently support this argument are\n" + + "\thash_linkedlist and hash_skiplist.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + writes_per_second(10000, + "The write-rate of the background writer used in the\n" + + "\t`readwhilewriting` benchmark. Non-positive number indicates\n" + + "\tusing an unbounded write-rate in `readwhilewriting` benchmark.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + use_plain_table(false, + "Use plain-table sst format.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + cache_size(-1L, + "Number of bytes to use as a cache of uncompressed data.\n" + + "\tNegative means use default settings.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + seed(0L, + "Seed base for random number generators.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + num_levels(7, + "The total number of levels.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + numdistinct(1000L, + "Number of distinct keys to use. Used in RandomWithVerify to\n" + + "\tread/write on fewer keys so that gets are more likely to find the\n" + + "\tkey and puts are more likely to update the same key.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + merge_keys(-1L, + "Number of distinct keys to use for MergeRandom and\n" + + "\tReadRandomMergeRandom.\n" + + "\tIf negative, there will be FLAGS_num keys.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + bloom_locality(0,"Control bloom filter probes locality.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + duration(0,"Time in seconds for the random-ops tests to run.\n" + + "\tWhen 0 then num & reads determine the test duration.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + num_multi_db(0, + "Number of DBs used in the benchmark. 0 means single DB.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + histogram(false,"Print histogram of operation timings.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + min_write_buffer_number_to_merge( + defaultOptions_.minWriteBufferNumberToMerge(), + "The minimum number of write buffers that will be merged together\n" + + "\tbefore writing to storage. This is cheap because it is an\n" + + "\tin-memory merge. If this feature is not enabled, then all these\n" + + "\twrite buffers are flushed to L0 as separate files and this\n" + + "\tincreases read amplification because a get request has to check\n" + + "\tin all of these files. Also, an in-memory merge may result in\n" + + "\twriting less data to storage if there are duplicate records\n" + + "\tin each of these individual write buffers.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + max_background_compactions( + defaultOptions_.maxBackgroundCompactions(), + "The maximum number of concurrent background compactions\n" + + "\tthat can occur in parallel.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + max_background_flushes( + defaultOptions_.maxBackgroundFlushes(), + "The maximum number of concurrent background flushes\n" + + "\tthat can occur in parallel.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + max_background_jobs(defaultOptions_.maxBackgroundJobs(), + "The maximum number of concurrent background jobs\n" + + "\tthat can occur in parallel.") { + @Override + public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + /* TODO(yhchiang): enable the following + compaction_style((int32_t) defaultOptions_.compactionStyle(), + "style of compaction: level-based vs universal.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + },*/ + universal_size_ratio(0, + "Percentage flexibility while comparing file size\n" + + "\t(for universal compaction only).") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + universal_min_merge_width(0,"The minimum number of files in a\n" + + "\tsingle compaction run (for universal compaction only).") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + universal_max_merge_width(0,"The max number of files to compact\n" + + "\tin universal style compaction.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + universal_max_size_amplification_percent(0, + "The max size amplification for universal style compaction.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + universal_compression_size_percent(-1, + "The percentage of the database to compress for universal\n" + + "\tcompaction. -1 means compress everything.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + block_size(defaultBlockBasedTableOptions_.blockSize(), + "Number of bytes in a block.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + compressed_cache_size(-1L, + "Number of bytes to use as a cache of compressed data.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + open_files(defaultOptions_.maxOpenFiles(), + "Maximum number of files to keep open at the same time\n" + + "\t(use default if == 0)") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + bloom_bits(-1,"Bloom filter bits per key. Negative means\n" + + "\tuse default settings.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + memtable_bloom_size_ratio(0.0d, "Ratio of memtable used by the bloom filter.\n" + + "\t0 means no bloom filter.") { + @Override public Object parseValue(String value) { + return Double.parseDouble(value); + } + }, + memtable_whole_key_filtering(false, "Enable whole key bloom filter in memtable.") { + @Override + public Object parseValue(String value) { + return parseBoolean(value); + } + }, + cache_numshardbits(-1,"Number of shards for the block cache\n" + + "\tis 2 ** cache_numshardbits. Negative means use default settings.\n" + + "\tThis is applied only if FLAGS_cache_size is non-negative.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + verify_checksum(false,"Verify checksum for every block read\n" + + "\tfrom storage.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + statistics(false,"Database statistics.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + writes(-1L, "Number of write operations to do. If negative, do\n" + + "\t--num reads.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + sync(false,"Sync all writes to disk.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + use_fsync(false,"If true, issue fsync instead of fdatasync.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + disable_wal(false,"If true, do not write WAL for write.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + wal_dir("", "If not empty, use the given dir for WAL.") { + @Override public Object parseValue(String value) { + return value; + } + }, + target_file_size_base(2 * 1048576,"Target file size at level-1") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + target_file_size_multiplier(1, + "A multiplier to compute target level-N file size (N >= 2)") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + max_bytes_for_level_base(10 * 1048576, + "Max bytes for level-1") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + max_bytes_for_level_multiplier(10.0d, + "A multiplier to compute max bytes for level-N (N >= 2)") { + @Override public Object parseValue(String value) { + return Double.parseDouble(value); + } + }, + level0_stop_writes_trigger(12,"Number of files in level-0\n" + + "\tthat will trigger put stop.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + level0_slowdown_writes_trigger(8,"Number of files in level-0\n" + + "\tthat will slow down writes.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + level0_file_num_compaction_trigger(4,"Number of files in level-0\n" + + "\twhen compactions start.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + readwritepercent(90,"Ratio of reads to reads/writes (expressed\n" + + "\tas percentage) for the ReadRandomWriteRandom workload. The\n" + + "\tdefault value 90 means 90% operations out of all reads and writes\n" + + "\toperations are reads. In other words, 9 gets for every 1 put.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + mergereadpercent(70,"Ratio of merges to merges&reads (expressed\n" + + "\tas percentage) for the ReadRandomMergeRandom workload. The\n" + + "\tdefault value 70 means 70% out of all read and merge operations\n" + + "\tare merges. In other words, 7 merges for every 3 gets.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + deletepercent(2,"Percentage of deletes out of reads/writes/\n" + + "\tdeletes (used in RandomWithVerify only). RandomWithVerify\n" + + "\tcalculates writepercent as (100 - FLAGS_readwritepercent -\n" + + "\tdeletepercent), so deletepercent must be smaller than (100 -\n" + + "\tFLAGS_readwritepercent)") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + delete_obsolete_files_period_micros(0,"Option to delete\n" + + "\tobsolete files periodically. 0 means that obsolete files are\n" + + "\tdeleted after every compaction run.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + compression_type("snappy", + "Algorithm used to compress the database.") { + @Override public Object parseValue(String value) { + return value; + } + }, + compression_level(-1, + "Compression level. For zlib this should be -1 for the\n" + + "\tdefault level, or between 0 and 9.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + min_level_to_compress(-1,"If non-negative, compression starts\n" + + "\tfrom this level. Levels with number < min_level_to_compress are\n" + + "\tnot compressed. Otherwise, apply compression_type to\n" + + "\tall levels.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + table_cache_numshardbits(4,"") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + stats_interval(0L, "Stats are reported every N operations when\n" + + "\tthis is greater than zero. When 0 the interval grows over time.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + stats_per_interval(0,"Reports additional stats per interval when\n" + + "\tthis is greater than 0.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + perf_level(0,"Level of perf collection.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + max_compaction_bytes(0L, "Limit number of bytes in one compaction to be lower than this\n" + + "\threshold. But it's not guaranteed.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + readonly(false,"Run read only benchmarks.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + disable_auto_compactions(false,"Do not auto trigger compactions.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + wal_ttl_seconds(0L,"Set the TTL for the WAL Files in seconds.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + wal_size_limit_MB(0L,"Set the size limit for the WAL Files\n" + + "\tin MB.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + /* TODO(yhchiang): enable the following + direct_reads(rocksdb::EnvOptions().use_direct_reads, + "Allow direct I/O reads.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + direct_writes(rocksdb::EnvOptions().use_direct_reads, + "Allow direct I/O reads.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + */ + mmap_read(false, + "Allow reads to occur via mmap-ing files.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + mmap_write(false, + "Allow writes to occur via mmap-ing files.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + advise_random_on_open(defaultOptions_.adviseRandomOnOpen(), + "Advise random access on table file open.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + compaction_fadvice("NORMAL", + "Access pattern advice when a file is compacted.") { + @Override public Object parseValue(String value) { + return value; + } + }, + use_tailing_iterator(false, + "Use tailing iterator to access a series of keys instead of get.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + use_adaptive_mutex(defaultOptions_.useAdaptiveMutex(), + "Use adaptive mutex.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + bytes_per_sync(defaultOptions_.bytesPerSync(), + "Allows OS to incrementally sync files to disk while they are\n" + + "\tbeing written, in the background. Issue one request for every\n" + + "\tbytes_per_sync written. 0 turns it off.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + filter_deletes(false," On true, deletes use bloom-filter and drop\n" + + "\tthe delete if key not present.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + max_successive_merges(0,"Maximum number of successive merge\n" + + "\toperations on a key in the memtable.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + db(getTempDir("rocksdb-jni"), + "Use the db with the following name.") { + @Override public Object parseValue(String value) { + return value; + } + }, + use_mem_env(false, "Use RocksMemEnv instead of default filesystem based\n" + + "environment.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + java_comparator(null, "Class name of a Java Comparator to use instead\n" + + "\tof the default C++ ByteWiseComparatorImpl. Must be available on\n" + + "\tthe classpath") { + @Override + protected Object parseValue(final String value) { + try { + final ComparatorOptions copt = new ComparatorOptions(); + final Class clsComparator = + (Class)Class.forName(value); + final Constructor cstr = + clsComparator.getConstructor(ComparatorOptions.class); + return cstr.newInstance(copt); + } catch(final ClassNotFoundException cnfe) { + throw new IllegalArgumentException("Java Comparator '" + value + "'" + + " not found on the classpath", cnfe); + } catch(final NoSuchMethodException nsme) { + throw new IllegalArgumentException("Java Comparator '" + value + "'" + + " does not have a public ComparatorOptions constructor", nsme); + } catch(final IllegalAccessException | InstantiationException + | InvocationTargetException ie) { + throw new IllegalArgumentException("Unable to construct Java" + + " Comparator '" + value + "'", ie); + } + } + }; + + private Flag(Object defaultValue, String desc) { + defaultValue_ = defaultValue; + desc_ = desc; + } + + public Object getDefaultValue() { + return defaultValue_; + } + + public String desc() { + return desc_; + } + + public boolean parseBoolean(String value) { + if (value.equals("1")) { + return true; + } else if (value.equals("0")) { + return false; + } + return Boolean.parseBoolean(value); + } + + protected abstract Object parseValue(String value); + + private final Object defaultValue_; + private final String desc_; + } + + private final static String DEFAULT_TEMP_DIR = "/tmp"; + + private static String getTempDir(final String dirName) { + try { + return Files.createTempDirectory(dirName).toAbsolutePath().toString(); + } catch(final IOException ioe) { + System.err.println("Unable to create temp directory, defaulting to: " + + DEFAULT_TEMP_DIR); + return DEFAULT_TEMP_DIR + File.pathSeparator + dirName; + } + } + + private static class RandomGenerator { + private final byte[] data_; + private int dataLength_; + private int position_; + private double compressionRatio_; + Random rand_; + + private RandomGenerator(long seed, double compressionRatio) { + // We use a limited amount of data over and over again and ensure + // that it is larger than the compression window (32KB), and also + byte[] value = new byte[100]; + // large enough to serve all typical value sizes we want to write. + rand_ = new Random(seed); + dataLength_ = value.length * 10000; + data_ = new byte[dataLength_]; + compressionRatio_ = compressionRatio; + int pos = 0; + while (pos < dataLength_) { + compressibleBytes(value); + System.arraycopy(value, 0, data_, pos, + Math.min(value.length, dataLength_ - pos)); + pos += value.length; + } + } + + private void compressibleBytes(byte[] value) { + int baseLength = value.length; + if (compressionRatio_ < 1.0d) { + baseLength = (int) (compressionRatio_ * value.length + 0.5); + } + if (baseLength <= 0) { + baseLength = 1; + } + int pos; + for (pos = 0; pos < baseLength; ++pos) { + value[pos] = (byte) (' ' + rand_.nextInt(95)); // ' ' .. '~' + } + while (pos < value.length) { + System.arraycopy(value, 0, value, pos, + Math.min(baseLength, value.length - pos)); + pos += baseLength; + } + } + + private void generate(byte[] value) { + if (position_ + value.length > data_.length) { + position_ = 0; + assert(value.length <= data_.length); + } + position_ += value.length; + System.arraycopy(data_, position_ - value.length, + value, 0, value.length); + } + } + + boolean isFinished() { + synchronized(finishLock_) { + return isFinished_; + } + } + + void setFinished(boolean flag) { + synchronized(finishLock_) { + isFinished_ = flag; + } + } + + RocksDB db_; + final List benchmarks_; + final int num_; + final int reads_; + final int keySize_; + final int valueSize_; + final int threadNum_; + final int writesPerSeconds_; + final long randSeed_; + final boolean useExisting_; + final String databaseDir_; + double compressionRatio_; + RandomGenerator gen_; + long startTime_; + + // env + boolean useMemenv_; + + // memtable related + final int maxWriteBufferNumber_; + final int prefixSize_; + final int keysPerPrefix_; + final String memtable_; + final long hashBucketCount_; + + // sst format related + boolean usePlainTable_; + + Object finishLock_; + boolean isFinished_; + Map flags_; + // as the scope of a static member equals to the scope of the problem, + // we let its c++ pointer to be disposed in its finalizer. + static Options defaultOptions_ = new Options(); + static BlockBasedTableConfig defaultBlockBasedTableOptions_ = + new BlockBasedTableConfig(); + String compressionType_; + CompressionType compression_; +} diff --git a/src/rocksdb/java/crossbuild/Vagrantfile b/src/rocksdb/java/crossbuild/Vagrantfile new file mode 100644 index 000000000..0ee50de2c --- /dev/null +++ b/src/rocksdb/java/crossbuild/Vagrantfile @@ -0,0 +1,51 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + + config.vm.define "linux32" do |linux32| + linux32.vm.box = "bento/centos-6.10-i386" + linux32.vm.provision :shell, path: "build-linux-centos.sh" + end + + config.vm.define "linux64" do |linux64| + linux64.vm.box = "bento/centos-6.10" + linux64.vm.provision :shell, path: "build-linux-centos.sh" + end + + config.vm.define "linux32-musl" do |musl32| + musl32.vm.box = "alpine/alpine32" + musl32.vm.box_version = "3.6.0" + musl32.vm.provision :shell, path: "build-linux-alpine.sh" + end + + config.vm.define "linux64-musl" do |musl64| + musl64.vm.box = "generic/alpine36" + + ## Should use the alpine/alpine64 box, but this issue needs to be fixed first - https://github.com/hashicorp/vagrant/issues/11218 + # musl64.vm.box = "alpine/alpine64" + # musl64.vm.box_version = "3.6.0" + + musl64.vm.provision :shell, path: "build-linux-alpine.sh" + end + + config.vm.provider "virtualbox" do |v| + v.memory = 2048 + v.cpus = 4 + v.customize ["modifyvm", :id, "--nictype1", "virtio" ] + end + + if Vagrant.has_plugin?("vagrant-cachier") + config.cache.scope = :box + end + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.no_install = true + end + + config.vm.synced_folder "../target", "/rocksdb-build" + config.vm.synced_folder "../..", "/rocksdb", type: "rsync" + config.vm.boot_timeout = 1200 +end diff --git a/src/rocksdb/java/crossbuild/build-linux-alpine.sh b/src/rocksdb/java/crossbuild/build-linux-alpine.sh new file mode 100755 index 000000000..561d34141 --- /dev/null +++ b/src/rocksdb/java/crossbuild/build-linux-alpine.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +set -e + +# update Alpine with latest versions +echo '@edge http://nl.alpinelinux.org/alpine/edge/main' >> /etc/apk/repositories +echo '@community http://nl.alpinelinux.org/alpine/edge/community' >> /etc/apk/repositories +apk update +apk upgrade + +# install CA certificates +apk add ca-certificates + +# install build tools +apk add \ + build-base \ + coreutils \ + file \ + git \ + perl \ + automake \ + autoconf \ + cmake + +# install tool dependencies for building RocksDB static library +apk add \ + curl \ + bash \ + wget \ + tar \ + openssl + +# install RocksDB dependencies +apk add \ + snappy snappy-dev \ + zlib zlib-dev \ + bzip2 bzip2-dev \ + lz4 lz4-dev \ + zstd zstd-dev \ + linux-headers \ + jemalloc jemalloc-dev + +# install OpenJDK7 +apk add openjdk7 \ + && apk add java-cacerts \ + && rm /usr/lib/jvm/java-1.7-openjdk/jre/lib/security/cacerts \ + && ln -s /etc/ssl/certs/java/cacerts /usr/lib/jvm/java-1.7-openjdk/jre/lib/security/cacerts + +# cleanup +rm -rf /var/cache/apk/* + +# puts javac in the PATH +export JAVA_HOME=/usr/lib/jvm/java-1.7-openjdk +export PATH=/usr/lib/jvm/java-1.7-openjdk/bin:$PATH + +# gflags from source +cd /tmp &&\ + git clone -b v2.0 --single-branch https://github.com/gflags/gflags.git &&\ + cd gflags &&\ + ./configure --prefix=/usr && make && make install &&\ + rm -rf /tmp/* + + +# build rocksdb +cd /rocksdb +make jclean clean +PORTABLE=1 make -j8 rocksdbjavastatic +cp /rocksdb/java/target/librocksdbjni-* /rocksdb-build +cp /rocksdb/java/target/rocksdbjni-* /rocksdb-build diff --git a/src/rocksdb/java/crossbuild/build-linux-centos.sh b/src/rocksdb/java/crossbuild/build-linux-centos.sh new file mode 100755 index 000000000..176e3456c --- /dev/null +++ b/src/rocksdb/java/crossbuild/build-linux-centos.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +set -e + +# remove fixed relesever variable present in the hanscode boxes +sudo rm -f /etc/yum/vars/releasever + +# enable EPEL +sudo yum -y install epel-release + +# install all required packages for rocksdb that are available through yum +sudo yum -y install openssl java-1.7.0-openjdk-devel zlib-devel bzip2-devel lz4-devel snappy-devel libzstd-devel jemalloc-devel cmake3 + +# set up cmake3 as cmake binary +sudo alternatives --install /usr/local/bin/cmake cmake /usr/bin/cmake 10 --slave /usr/local/bin/ctest ctest /usr/bin/ctest --slave /usr/local/bin/cpack cpack /usr/bin/cpack --slave /usr/local/bin/ccmake ccmake /usr/bin/ccmake +sudo alternatives --install /usr/local/bin/cmake cmake /usr/bin/cmake3 20 --slave /usr/local/bin/ctest ctest /usr/bin/ctest3 --slave /usr/local/bin/cpack cpack /usr/bin/cpack3 --slave /usr/local/bin/ccmake ccmake /usr/bin/ccmake3 + +# install gcc/g++ 4.8.2 from tru/devtools-2 +sudo wget -O /etc/yum.repos.d/devtools-2.repo https://people.centos.org/tru/devtools-2/devtools-2.repo +sudo yum -y install devtoolset-2-binutils devtoolset-2-gcc devtoolset-2-gcc-c++ + +# install gflags +wget https://github.com/gflags/gflags/archive/v2.0.tar.gz -O gflags-2.0.tar.gz +tar xvfz gflags-2.0.tar.gz; cd gflags-2.0; scl enable devtoolset-2 ./configure; scl enable devtoolset-2 make; sudo make install +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib + +# set java home so we can build rocksdb jars +export JAVA_HOME=/usr/lib/jvm/java-1.7.0 + +export PATH=$JAVA_HOME:/usr/local/bin:$PATH + +# build rocksdb +cd /rocksdb +scl enable devtoolset-2 'make clean-not-downloaded' +scl enable devtoolset-2 'PORTABLE=1 make -j8 rocksdbjavastatic' +cp /rocksdb/java/target/librocksdbjni-* /rocksdb-build +cp /rocksdb/java/target/rocksdbjni-* /rocksdb-build diff --git a/src/rocksdb/java/crossbuild/build-linux.sh b/src/rocksdb/java/crossbuild/build-linux.sh new file mode 100755 index 000000000..74178adb5 --- /dev/null +++ b/src/rocksdb/java/crossbuild/build-linux.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# install all required packages for rocksdb +sudo apt-get update +sudo apt-get -y install git make gcc g++ libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev default-jdk + +# set java home so we can build rocksdb jars +export JAVA_HOME=$(echo /usr/lib/jvm/java-7-openjdk*) +cd /rocksdb +make jclean clean +make -j 4 rocksdbjavastatic +cp /rocksdb/java/target/librocksdbjni-* /rocksdb-build +cp /rocksdb/java/target/rocksdbjni-* /rocksdb-build +sudo shutdown -h now + diff --git a/src/rocksdb/java/crossbuild/docker-build-linux-alpine.sh b/src/rocksdb/java/crossbuild/docker-build-linux-alpine.sh new file mode 100755 index 000000000..e3e852efe --- /dev/null +++ b/src/rocksdb/java/crossbuild/docker-build-linux-alpine.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +set -e +#set -x + +# just in-case this is run outside Docker +mkdir -p /rocksdb-local-build + +rm -rf /rocksdb-local-build/* +cp -r /rocksdb-host/* /rocksdb-local-build +cd /rocksdb-local-build + +make clean-not-downloaded +PORTABLE=1 make -j2 rocksdbjavastatic + +cp java/target/librocksdbjni-linux*.so java/target/rocksdbjni-*-linux*.jar java/target/rocksdbjni-*-linux*.jar.sha1 /rocksdb-java-target diff --git a/src/rocksdb/java/crossbuild/docker-build-linux-centos.sh b/src/rocksdb/java/crossbuild/docker-build-linux-centos.sh new file mode 100755 index 000000000..16581dec7 --- /dev/null +++ b/src/rocksdb/java/crossbuild/docker-build-linux-centos.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +set -e +#set -x + +# just in-case this is run outside Docker +mkdir -p /rocksdb-local-build + +rm -rf /rocksdb-local-build/* +cp -r /rocksdb-host/* /rocksdb-local-build +cd /rocksdb-local-build + +# Use scl devtoolset if available +if hash scl 2>/dev/null; then + if scl --list | grep -q 'devtoolset-8'; then + # CentOS 6+ + scl enable devtoolset-8 'make clean-not-downloaded' + scl enable devtoolset-8 'PORTABLE=1 make -j2 rocksdbjavastatic' + elif scl --list | grep -q 'devtoolset-7'; then + # CentOS 6+ + scl enable devtoolset-7 'make clean-not-downloaded' + scl enable devtoolset-7 'PORTABLE=1 make -j2 rocksdbjavastatic' + elif scl --list | grep -q 'devtoolset-2'; then + # CentOS 5 or 6 + scl enable devtoolset-2 'make clean-not-downloaded' + scl enable devtoolset-2 'PORTABLE=1 make -j2 rocksdbjavastatic' + else + echo "Could not find devtoolset" + exit 1; + fi +else + make clean-not-downloaded + PORTABLE=1 make -j2 rocksdbjavastatic +fi + +cp java/target/librocksdbjni-linux*.so java/target/rocksdbjni-*-linux*.jar java/target/rocksdbjni-*-linux*.jar.sha1 /rocksdb-java-target + diff --git a/src/rocksdb/java/jdb_bench.sh b/src/rocksdb/java/jdb_bench.sh new file mode 100755 index 000000000..5dfc385e3 --- /dev/null +++ b/src/rocksdb/java/jdb_bench.sh @@ -0,0 +1,13 @@ +# shellcheck disable=SC2148 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +PLATFORM=64 +if [ `getconf LONG_BIT` != "64" ] +then + PLATFORM=32 +fi + +ROCKS_JAR=`find target -name rocksdbjni*.jar` + +echo "Running benchmark in $PLATFORM-Bit mode." +# shellcheck disable=SC2068 +java -server -d$PLATFORM -XX:NewSize=4m -XX:+AggressiveOpts -Djava.library.path=target -cp "${ROCKS_JAR}:benchmark/target/classes" org.rocksdb.benchmark.DbBenchmark $@ diff --git a/src/rocksdb/java/jmh/LICENSE-HEADER.txt b/src/rocksdb/java/jmh/LICENSE-HEADER.txt new file mode 100644 index 000000000..365ee653b --- /dev/null +++ b/src/rocksdb/java/jmh/LICENSE-HEADER.txt @@ -0,0 +1,5 @@ +Copyright (c) 2011-present, Facebook, Inc. All rights reserved. + This source code is licensed under both the GPLv2 (found in the + COPYING file in the root directory) and Apache 2.0 License + (found in the LICENSE.Apache file in the root directory). + diff --git a/src/rocksdb/java/jmh/README.md b/src/rocksdb/java/jmh/README.md new file mode 100644 index 000000000..1575ab517 --- /dev/null +++ b/src/rocksdb/java/jmh/README.md @@ -0,0 +1,24 @@ +# JMH Benchmarks for RocksJava + +These are micro-benchmarks for RocksJava functionality, using [JMH (Java Microbenchmark Harness)](https://openjdk.java.net/projects/code-tools/jmh/). + +## Compiling + +**Note**: This uses a specific build of RocksDB that is set in the `` element of the `dependencies` section of the `pom.xml` file. If you are testing local changes you should build and install a SNAPSHOT version of rocksdbjni, and update the `pom.xml` of rocksdbjni-jmh file to test with this. + +For instance, this is how to install the OSX jar you just built for 6.26.0 + +```bash +$ mvn install:install-file -Dfile=./java/target/rocksdbjni-6.26.0-SNAPSHOT-osx.jar -DgroupId=org.rocksdb -DartifactId=rocksdbjni -Dversion=6.26.0-SNAPSHOT -Dpackaging=jar +``` + +```bash +$ mvn package +``` + +## Running +```bash +$ java -jar target/rocksdbjni-jmh-1.0-SNAPSHOT-benchmarks.jar +``` + +NOTE: you can append `-help` to the command above to see all of the JMH runtime options. diff --git a/src/rocksdb/java/jmh/pom.xml b/src/rocksdb/java/jmh/pom.xml new file mode 100644 index 000000000..26615da86 --- /dev/null +++ b/src/rocksdb/java/jmh/pom.xml @@ -0,0 +1,138 @@ + + + 4.0.0 + + org.rocksdb + rocksdbjni-jmh + 1.0-SNAPSHOT + + http://rocksdb.org/ + + rocksdbjni-jmh + JMH Benchmarks for RocksDB Java API + + + Facebook, Inc. + https://www.facebook.com + + + + + Apache License 2.0 + http://www.apache.org/licenses/LICENSE-2.0.html + repo + + + GNU General Public License, version 2 + http://www.gnu.org/licenses/gpl-2.0.html + repo + + + + + scm:git:git://github.com/facebook/rocksdb.git + scm:git:git@github.com:facebook/rocksdb.git + http://github.com/facebook/rocksdb/ + + + + 1.7 + 1.7 + UTF-8 + + 1.22 + benchmarks + + + + + org.rocksdb + rocksdbjni + 6.27.0-SNAPSHOT + + + + org.openjdk.jmh + jmh-core + ${jmh.version} + + + org.openjdk.jmh + jmh-generator-annprocess + ${jmh.version} + provided + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.8.1 + + ${project.build.source} + ${project.build.target} + ${project.build.sourceEncoding} + + + + + com.mycila + license-maven-plugin + 3.0 + true + +
LICENSE-HEADER.txt
+ true + true + true + + pom.xml + + ${project.build.sourceEncoding} +
+
+ + + org.apache.maven.plugins + maven-shade-plugin + 3.2.1 + + + package + + shade + + + ${project.artifactId}-${project.version}-${uberjar.name} + + + org.openjdk.jmh.Main + + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + + + + + +
+
+ +
\ No newline at end of file diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java new file mode 100644 index 000000000..1973b5487 --- /dev/null +++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/ComparatorBenchmarks.java @@ -0,0 +1,139 @@ +/** + * Copyright (c) 2011-present, Facebook, Inc. All rights reserved. + * This source code is licensed under both the GPLv2 (found in the + * COPYING file in the root directory) and Apache 2.0 License + * (found in the LICENSE.Apache file in the root directory). + */ +package org.rocksdb.jmh; + +import org.openjdk.jmh.annotations.*; +import org.rocksdb.*; +import org.rocksdb.util.BytewiseComparator; +import org.rocksdb.util.FileUtils; +import org.rocksdb.util.ReverseBytewiseComparator; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.rocksdb.util.KVUtils.ba; + +@State(Scope.Benchmark) +public class ComparatorBenchmarks { + + @Param({ + "native_bytewise", + "native_reverse_bytewise", + + "java_bytewise_non-direct_reused-64_adaptive-mutex", + "java_bytewise_non-direct_reused-64_non-adaptive-mutex", + "java_bytewise_non-direct_reused-64_thread-local", + "java_bytewise_direct_reused-64_adaptive-mutex", + "java_bytewise_direct_reused-64_non-adaptive-mutex", + "java_bytewise_direct_reused-64_thread-local", + "java_bytewise_non-direct_no-reuse", + "java_bytewise_direct_no-reuse", + + "java_reverse_bytewise_non-direct_reused-64_adaptive-mutex", + "java_reverse_bytewise_non-direct_reused-64_non-adaptive-mutex", + "java_reverse_bytewise_non-direct_reused-64_thread-local", + "java_reverse_bytewise_direct_reused-64_adaptive-mutex", + "java_reverse_bytewise_direct_reused-64_non-adaptive-mutex", + "java_reverse_bytewise_direct_reused-64_thread-local", + "java_reverse_bytewise_non-direct_no-reuse", + "java_reverse_bytewise_direct_no-reuse" + }) + public String comparatorName; + + Path dbDir; + ComparatorOptions comparatorOptions; + AbstractComparator comparator; + Options options; + RocksDB db; + + @Setup(Level.Trial) + public void setup() throws IOException, RocksDBException { + RocksDB.loadLibrary(); + + dbDir = Files.createTempDirectory("rocksjava-comparator-benchmarks"); + + options = new Options() + .setCreateIfMissing(true); + + if ("native_bytewise".equals(comparatorName)) { + options.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR); + + } else if ("native_reverse_bytewise".equals(comparatorName)) { + options.setComparator(BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR); + + } else if (comparatorName.startsWith("java_")) { + comparatorOptions = new ComparatorOptions(); + + if (comparatorName.indexOf("non-direct") > -1) { + comparatorOptions.setUseDirectBuffer(false); + } else if (comparatorName.indexOf("direct") > -1) { + comparatorOptions.setUseDirectBuffer(true); + } + + if (comparatorName.indexOf("no-reuse") > -1) { + comparatorOptions.setMaxReusedBufferSize(-1); + } else if (comparatorName.indexOf("_reused-") > -1) { + final int idx = comparatorName.indexOf("_reused-"); + String s = comparatorName.substring(idx + 8); + s = s.substring(0, s.indexOf('_')); + comparatorOptions.setMaxReusedBufferSize(Integer.parseInt(s)); + } + + if (comparatorName.indexOf("non-adaptive-mutex") > -1) { + comparatorOptions.setReusedSynchronisationType(ReusedSynchronisationType.MUTEX); + } else if (comparatorName.indexOf("adaptive-mutex") > -1) { + comparatorOptions.setReusedSynchronisationType(ReusedSynchronisationType.ADAPTIVE_MUTEX); + } else if (comparatorName.indexOf("thread-local") > -1) { + comparatorOptions.setReusedSynchronisationType(ReusedSynchronisationType.THREAD_LOCAL); + } + + if (comparatorName.startsWith("java_bytewise")) { + comparator = new BytewiseComparator(comparatorOptions); + } else if (comparatorName.startsWith("java_reverse_bytewise")) { + comparator = new ReverseBytewiseComparator(comparatorOptions); + } + + options.setComparator(comparator); + + } else { + throw new IllegalArgumentException("Unknown comparatorName: " + comparatorName); + } + + db = RocksDB.open(options, dbDir.toAbsolutePath().toString()); + } + + @TearDown(Level.Trial) + public void cleanup() throws IOException { + db.close(); + if (comparator != null) { + comparator.close(); + } + if (comparatorOptions != null) { + comparatorOptions.close(); + } + options.close(); + FileUtils.delete(dbDir); + } + + @State(Scope.Benchmark) + public static class Counter { + private final AtomicInteger count = new AtomicInteger(); + + public int next() { + return count.getAndIncrement(); + } + } + + + @Benchmark + public void put(final Counter counter) throws RocksDBException { + final int i = counter.next(); + db.put(ba("key" + i), ba("value" + i)); + } +} diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java new file mode 100644 index 000000000..e34005c2f --- /dev/null +++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/GetBenchmarks.java @@ -0,0 +1,139 @@ +/** + * Copyright (c) 2011-present, Facebook, Inc. All rights reserved. + * This source code is licensed under both the GPLv2 (found in the + * COPYING file in the root directory) and Apache 2.0 License + * (found in the LICENSE.Apache file in the root directory). + */ +package org.rocksdb.jmh; + +import org.openjdk.jmh.annotations.*; +import org.rocksdb.*; +import org.rocksdb.util.FileUtils; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.rocksdb.util.KVUtils.ba; + +@State(Scope.Benchmark) +public class GetBenchmarks { + + @Param({ + "no_column_family", + "1_column_family", + "20_column_families", + "100_column_families" + }) + String columnFamilyTestType; + + @Param("100000") + int keyCount; + + Path dbDir; + DBOptions options; + int cfs = 0; // number of column families + private AtomicInteger cfHandlesIdx; + ColumnFamilyHandle[] cfHandles; + RocksDB db; + private final AtomicInteger keyIndex = new AtomicInteger(); + + @Setup(Level.Trial) + public void setup() throws IOException, RocksDBException { + RocksDB.loadLibrary(); + + dbDir = Files.createTempDirectory("rocksjava-get-benchmarks"); + + options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + + final List cfDescriptors = new ArrayList<>(); + cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + + if ("1_column_family".equals(columnFamilyTestType)) { + cfs = 1; + } else if ("20_column_families".equals(columnFamilyTestType)) { + cfs = 20; + } else if ("100_column_families".equals(columnFamilyTestType)) { + cfs = 100; + } + + if (cfs > 0) { + cfHandlesIdx = new AtomicInteger(1); + for (int i = 1; i <= cfs; i++) { + cfDescriptors.add(new ColumnFamilyDescriptor(ba("cf" + i))); + } + } + + final List cfHandlesList = new ArrayList<>(cfDescriptors.size()); + db = RocksDB.open(options, dbDir.toAbsolutePath().toString(), cfDescriptors, cfHandlesList); + cfHandles = cfHandlesList.toArray(new ColumnFamilyHandle[0]); + + // store initial data for retrieving via get + for (int i = 0; i < cfs; i++) { + for (int j = 0; j < keyCount; j++) { + db.put(cfHandles[i], ba("key" + j), ba("value" + j)); + } + } + + try (final FlushOptions flushOptions = new FlushOptions() + .setWaitForFlush(true)) { + db.flush(flushOptions); + } + } + + @TearDown(Level.Trial) + public void cleanup() throws IOException { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + db.close(); + options.close(); + FileUtils.delete(dbDir); + } + + private ColumnFamilyHandle getColumnFamily() { + if (cfs == 0) { + return cfHandles[0]; + } else if (cfs == 1) { + return cfHandles[1]; + } else { + int idx = cfHandlesIdx.getAndIncrement(); + if (idx > cfs) { + cfHandlesIdx.set(1); // doesn't ensure a perfect distribution, but it's ok + idx = 0; + } + return cfHandles[idx]; + } + } + + /** + * Takes the next position in the index. + */ + private int next() { + int idx; + int nextIdx; + while (true) { + idx = keyIndex.get(); + nextIdx = idx + 1; + if (nextIdx >= keyCount) { + nextIdx = 0; + } + + if (keyIndex.compareAndSet(idx, nextIdx)) { + break; + } + } + return idx; + } + + @Benchmark + public byte[] get() throws RocksDBException { + final int keyIdx = next(); + return db.get(getColumnFamily(), ba("key" + keyIdx)); + } +} diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java new file mode 100644 index 000000000..c8c827444 --- /dev/null +++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/MultiGetBenchmarks.java @@ -0,0 +1,232 @@ +/** + * Copyright (c) 2011-present, Facebook, Inc. All rights reserved. + * This source code is licensed under both the GPLv2 (found in the + * COPYING file in the root directory) and Apache 2.0 License + * (found in the LICENSE.Apache file in the root directory). + */ +package org.rocksdb.jmh; + +import static org.rocksdb.util.KVUtils.ba; +import static org.rocksdb.util.KVUtils.keys; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import org.openjdk.jmh.annotations.*; +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.OptionsBuilder; +import org.rocksdb.*; +import org.rocksdb.util.FileUtils; + +@State(Scope.Thread) +public class MultiGetBenchmarks { + @Param({ + "no_column_family", + "1_column_family", + "20_column_families", + "100_column_families" + }) + String columnFamilyTestType; + + @Param({"10000", "25000", "100000"}) int keyCount; + + @Param({ + "10", + "100", + "1000", + "10000", + }) + int multiGetSize; + + @Param({"16", "64", "250", "1000", "4000", "16000"}) int valueSize; + @Param({"16"}) int keySize; // big enough + + Path dbDir; + DBOptions options; + int cfs = 0; // number of column families + private AtomicInteger cfHandlesIdx; + ColumnFamilyHandle[] cfHandles; + RocksDB db; + private final AtomicInteger keyIndex = new AtomicInteger(); + + @Setup(Level.Trial) + public void setup() throws IOException, RocksDBException { + RocksDB.loadLibrary(); + + dbDir = Files.createTempDirectory("rocksjava-multiget-benchmarks"); + + options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + + final List cfDescriptors = new ArrayList<>(); + cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + + if ("1_column_family".equals(columnFamilyTestType)) { + cfs = 1; + } else if ("20_column_families".equals(columnFamilyTestType)) { + cfs = 20; + } else if ("100_column_families".equals(columnFamilyTestType)) { + cfs = 100; + } + + if (cfs > 0) { + cfHandlesIdx = new AtomicInteger(1); + for (int i = 1; i <= cfs; i++) { + cfDescriptors.add(new ColumnFamilyDescriptor(ba("cf" + i))); + } + } + + final List cfHandlesList = new ArrayList<>(cfDescriptors.size()); + db = RocksDB.open(options, dbDir.toAbsolutePath().toString(), cfDescriptors, cfHandlesList); + cfHandles = cfHandlesList.toArray(new ColumnFamilyHandle[0]); + + // store initial data for retrieving via get + for (int i = 0; i < cfs; i++) { + for (int j = 0; j < keyCount; j++) { + final byte[] paddedValue = Arrays.copyOf(ba("value" + j), valueSize); + db.put(cfHandles[i], ba("key" + j), paddedValue); + } + } + + try (final FlushOptions flushOptions = new FlushOptions() + .setWaitForFlush(true)) { + db.flush(flushOptions); + } + } + + @TearDown(Level.Trial) + public void cleanup() throws IOException { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + db.close(); + options.close(); + FileUtils.delete(dbDir); + } + + private ColumnFamilyHandle getColumnFamily() { + if (cfs == 0) { + return cfHandles[0]; + } else if (cfs == 1) { + return cfHandles[1]; + } else { + int idx = cfHandlesIdx.getAndIncrement(); + if (idx > cfs) { + cfHandlesIdx.set(1); // doesn't ensure a perfect distribution, but it's ok + idx = 0; + } + return cfHandles[idx]; + } + } + + /** + * Reserves the next {@inc} positions in the index. + * + * @param inc the number by which to increment the index + * @param limit the limit for the index + * @return the index before {@code inc} is added + */ + private int next(final int inc, final int limit) { + int idx; + int nextIdx; + while (true) { + idx = keyIndex.get(); + nextIdx = idx + inc; + if (nextIdx >= limit) { + nextIdx = inc; + } + + if (keyIndex.compareAndSet(idx, nextIdx)) { + break; + } + } + + if (nextIdx >= limit) { + return -1; + } else { + return idx; + } + } + + ByteBuffer keysBuffer; + ByteBuffer valuesBuffer; + + List valueBuffersList; + List keyBuffersList; + + @Setup + public void allocateSliceBuffers() { + keysBuffer = ByteBuffer.allocateDirect(keyCount * valueSize); + valuesBuffer = ByteBuffer.allocateDirect(keyCount * valueSize); + valueBuffersList = new ArrayList<>(); + keyBuffersList = new ArrayList<>(); + for (int i = 0; i < keyCount; i++) { + valueBuffersList.add(valuesBuffer.slice()); + valuesBuffer.position(i * valueSize); + keyBuffersList.add(keysBuffer.slice()); + keysBuffer.position(i * keySize); + } + } + + @TearDown + public void freeSliceBuffers() { + valueBuffersList.clear(); + } + + @Benchmark + public List multiGet10() throws RocksDBException { + final int fromKeyIdx = next(multiGetSize, keyCount); + if (fromKeyIdx >= 0) { + final List keys = keys(fromKeyIdx, fromKeyIdx + multiGetSize); + final List valueResults = db.multiGetAsList(keys); + for (final byte[] result : valueResults) { + if (result.length != valueSize) + throw new RuntimeException("Test valueSize assumption wrong"); + } + } + return new ArrayList<>(); + } + + @Benchmark + public List multiGetDirect10() throws RocksDBException { + final int fromKeyIdx = next(multiGetSize, keyCount); + if (fromKeyIdx >= 0) { + final List keys = keys(keyBuffersList, fromKeyIdx, fromKeyIdx + multiGetSize); + final List results = db.multiGetByteBuffers( + keys, valueBuffersList.subList(fromKeyIdx, fromKeyIdx + multiGetSize)); + for (final RocksDB.MultiGetInstance result : results) { + if (result.status.getCode() != Status.Code.Ok) + throw new RuntimeException("Test status assumption wrong"); + if (result.valueSize != valueSize) + throw new RuntimeException("Test valueSize assumption wrong"); + } + return results; + } + return new ArrayList<>(); + } + + public static void main(final String[] args) throws RunnerException { + final org.openjdk.jmh.runner.options.Options opt = + new OptionsBuilder() + .include(MultiGetBenchmarks.class.getSimpleName()) + .forks(1) + .jvmArgs("-ea") + .warmupIterations(1) + .measurementIterations(2) + .forks(2) + .param("columnFamilyTestType=", "1_column_family") + .param("multiGetSize=", "10", "1000") + .param("keyCount=", "1000") + .output("jmh_output") + .build(); + + new Runner(opt).run(); + } +} diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java new file mode 100644 index 000000000..5aae21cb9 --- /dev/null +++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/jmh/PutBenchmarks.java @@ -0,0 +1,112 @@ +/** + * Copyright (c) 2011-present, Facebook, Inc. All rights reserved. + * This source code is licensed under both the GPLv2 (found in the + * COPYING file in the root directory) and Apache 2.0 License + * (found in the LICENSE.Apache file in the root directory). + */ +package org.rocksdb.jmh; + +import org.openjdk.jmh.annotations.*; +import org.rocksdb.*; +import org.rocksdb.util.FileUtils; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.rocksdb.util.KVUtils.ba; + +@State(Scope.Benchmark) +public class PutBenchmarks { + + @Param({ + "no_column_family", + "1_column_family", + "20_column_families", + "100_column_families" + }) + String columnFamilyTestType; + + Path dbDir; + DBOptions options; + int cfs = 0; // number of column families + private AtomicInteger cfHandlesIdx; + ColumnFamilyHandle[] cfHandles; + RocksDB db; + + @Setup(Level.Trial) + public void setup() throws IOException, RocksDBException { + RocksDB.loadLibrary(); + + dbDir = Files.createTempDirectory("rocksjava-put-benchmarks"); + + options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + + final List cfDescriptors = new ArrayList<>(); + cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + + if ("1_column_family".equals(columnFamilyTestType)) { + cfs = 1; + } else if ("20_column_families".equals(columnFamilyTestType)) { + cfs = 20; + } else if ("100_column_families".equals(columnFamilyTestType)) { + cfs = 100; + } + + if (cfs > 0) { + cfHandlesIdx = new AtomicInteger(1); + for (int i = 1; i <= cfs; i++) { + cfDescriptors.add(new ColumnFamilyDescriptor(ba("cf" + i))); + } + } + + final List cfHandlesList = new ArrayList<>(cfDescriptors.size()); + db = RocksDB.open(options, dbDir.toAbsolutePath().toString(), cfDescriptors, cfHandlesList); + cfHandles = cfHandlesList.toArray(new ColumnFamilyHandle[0]); + } + + @TearDown(Level.Trial) + public void cleanup() throws IOException { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + db.close(); + options.close(); + FileUtils.delete(dbDir); + } + + private ColumnFamilyHandle getColumnFamily() { + if (cfs == 0) { + return cfHandles[0]; + } else if (cfs == 1) { + return cfHandles[1]; + } else { + int idx = cfHandlesIdx.getAndIncrement(); + if (idx > cfs) { + cfHandlesIdx.set(1); // doesn't ensure a perfect distribution, but it's ok + idx = 0; + } + return cfHandles[idx]; + } + } + + @State(Scope.Benchmark) + public static class Counter { + private final AtomicInteger count = new AtomicInteger(); + + public int next() { + return count.getAndIncrement(); + } + } + + @Benchmark + public void put(final ComparatorBenchmarks.Counter counter) throws RocksDBException { + final int i = counter.next(); + db.put(getColumnFamily(), ba("key" + i), ba("value" + i)); + } +} diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/FileUtils.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/FileUtils.java new file mode 100644 index 000000000..63744a14f --- /dev/null +++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/FileUtils.java @@ -0,0 +1,59 @@ +/** + * Copyright (c) 2011-present, Facebook, Inc. All rights reserved. + * This source code is licensed under both the GPLv2 (found in the + * COPYING file in the root directory) and Apache 2.0 License + * (found in the LICENSE.Apache file in the root directory). + */ +package org.rocksdb.util; + +import java.io.IOException; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; + +public final class FileUtils { + private static final SimpleFileVisitor DELETE_DIR_VISITOR = new DeleteDirVisitor(); + + /** + * Deletes a path from the filesystem + * + * If the path is a directory its contents + * will be recursively deleted before it itself + * is deleted. + * + * Note that removal of a directory is not an atomic-operation + * and so if an error occurs during removal, some of the directories + * descendants may have already been removed + * + * @param path the path to delete. + * + * @throws IOException if an error occurs whilst removing a file or directory + */ + public static void delete(final Path path) throws IOException { + if (!Files.isDirectory(path)) { + Files.deleteIfExists(path); + } else { + Files.walkFileTree(path, DELETE_DIR_VISITOR); + } + } + + private static class DeleteDirVisitor extends SimpleFileVisitor { + @Override + public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException { + Files.deleteIfExists(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory(final Path dir, final IOException exc) throws IOException { + if (exc != null) { + throw exc; + } + + Files.deleteIfExists(dir); + return FileVisitResult.CONTINUE; + } + } +} diff --git a/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/KVUtils.java b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/KVUtils.java new file mode 100644 index 000000000..5077291c8 --- /dev/null +++ b/src/rocksdb/java/jmh/src/main/java/org/rocksdb/util/KVUtils.java @@ -0,0 +1,72 @@ +/** + * Copyright (c) 2011-present, Facebook, Inc. All rights reserved. + * This source code is licensed under both the GPLv2 (found in the + * COPYING file in the root directory) and Apache 2.0 License + * (found in the LICENSE.Apache file in the root directory). + */ +package org.rocksdb.util; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +public final class KVUtils { + + /** + * Get a byte array from a string. + * + * Assumes UTF-8 encoding + * + * @param string the string + * + * @return the bytes. + */ + public static byte[] ba(final String string) { + return string.getBytes(UTF_8); + } + + /** + * Get a string from a byte array. + * + * Assumes UTF-8 encoding + * + * @param bytes the bytes + * + * @return the string. + */ + public static String str(final byte[] bytes) { + return new String(bytes, UTF_8); + } + + /** + * Get a list of keys where the keys are named key1..key1+N + * in the range of {@code from} to {@code to} i.e. keyFrom..keyTo. + * + * @param from the first key + * @param to the last key + * + * @return the array of keys + */ + public static List keys(final int from, final int to) { + final List keys = new ArrayList<>(to - from); + for (int i = from; i < to; i++) { + keys.add(ba("key" + i)); + } + return keys; + } + + public static List keys( + final List keyBuffers, final int from, final int to) { + final List keys = new ArrayList<>(to - from); + for (int i = from; i < to; i++) { + final ByteBuffer key = keyBuffers.get(i); + key.clear(); + key.put(ba("key" + i)); + key.flip(); + keys.add(key); + } + return keys; + } +} diff --git a/src/rocksdb/java/pom.xml.template b/src/rocksdb/java/pom.xml.template new file mode 100644 index 000000000..4abff4768 --- /dev/null +++ b/src/rocksdb/java/pom.xml.template @@ -0,0 +1,178 @@ + + + 4.0.0 + + org.rocksdb + rocksdbjni + ${ROCKSDB_JAVA_VERSION} + + RocksDB JNI + RocksDB fat jar that contains .so files for linux32 and linux64 (glibc and musl-libc), jnilib files + for Mac OSX, and a .dll for Windows x64. + + https://rocksdb.org + 2012 + + + + Apache License 2.0 + http://www.apache.org/licenses/LICENSE-2.0.html + repo + + + GNU General Public License, version 2 + http://www.gnu.org/licenses/gpl-2.0.html + repo + + + + + scm:git:https://github.com/facebook/rocksdb.git + scm:git:https://github.com/facebook/rocksdb.git + scm:git:https://github.com/facebook/rocksdb.git + + + + Facebook + https://www.facebook.com + + + + + Facebook + help@facebook.com + America/New_York + + architect + + + + + + + rocksdb - Google Groups + rocksdb-subscribe@googlegroups.com + rocksdb-unsubscribe@googlegroups.com + rocksdb@googlegroups.com + https://groups.google.com/forum/#!forum/rocksdb + + + + + 1.7 + 1.7 + UTF-8 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.2 + + ${project.build.source} + ${project.build.target} + ${project.build.sourceEncoding} + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.18.1 + + ${argLine} -ea -Xcheck:jni -Djava.library.path=${project.build.directory} + false + false + + ${project.build.directory}/* + + + + + org.jacoco + jacoco-maven-plugin + 0.7.2.201409121644 + + + + prepare-agent + + + + report + prepare-package + + report + + + + + + org.codehaus.gmaven + groovy-maven-plugin + 2.0 + + + process-classes + + execute + + + + Xenu + + + String fileContents = new File(project.basedir.absolutePath + '/../include/rocksdb/version.h').getText('UTF-8') + matcher = (fileContents =~ /(?s).*ROCKSDB_MAJOR ([0-9]+).*?/) + String major_version = matcher.getAt(0).getAt(1) + matcher = (fileContents =~ /(?s).*ROCKSDB_MINOR ([0-9]+).*?/) + String minor_version = matcher.getAt(0).getAt(1) + matcher = (fileContents =~ /(?s).*ROCKSDB_PATCH ([0-9]+).*?/) + String patch_version = matcher.getAt(0).getAt(1) + String version = String.format('%s.%s.%s', major_version, minor_version, patch_version) + // Set version to be used in pom.properties + project.version = version + // Set version to be set as jar name + project.build.finalName = project.artifactId + "-" + version + + + + + + + + + + + junit + junit + 4.13.1 + test + + + org.hamcrest + hamcrest + 2.2 + test + + + cglib + cglib + 3.3.0 + test + + + org.assertj + assertj-core + 2.9.0 + test + + + org.mockito + mockito-all + 1.10.19 + test + + + diff --git a/src/rocksdb/java/rocksjni/backup_engine_options.cc b/src/rocksdb/java/rocksjni/backup_engine_options.cc new file mode 100644 index 000000000..25bfb6720 --- /dev/null +++ b/src/rocksdb/java/rocksjni/backup_engine_options.cc @@ -0,0 +1,365 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::BackupEnginge and +// ROCKSDB_NAMESPACE::BackupEngineOptions methods from Java side. + +#include +#include +#include + +#include +#include + +#include "include/org_rocksdb_BackupEngineOptions.h" +#include "rocksdb/utilities/backup_engine.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/////////////////////////////////////////////////////////////////////////// +// BackupDBOptions + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: newBackupEngineOptions + * Signature: (Ljava/lang/String;)J + */ +jlong Java_org_rocksdb_BackupEngineOptions_newBackupEngineOptions( + JNIEnv* env, jclass /*jcls*/, jstring jpath) { + const char* cpath = env->GetStringUTFChars(jpath, nullptr); + if (cpath == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + auto* bopt = new ROCKSDB_NAMESPACE::BackupEngineOptions(cpath); + env->ReleaseStringUTFChars(jpath, cpath); + return GET_CPLUSPLUS_POINTER(bopt); +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: backupDir + * Signature: (J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_BackupEngineOptions_backupDir(JNIEnv* env, + jobject /*jopt*/, + jlong jhandle) { + auto* bopt = + reinterpret_cast(jhandle); + return env->NewStringUTF(bopt->backup_dir.c_str()); +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setBackupEnv + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setBackupEnv( + JNIEnv* /*env*/, jobject /*jopt*/, jlong jhandle, jlong jrocks_env_handle) { + auto* bopt = + reinterpret_cast(jhandle); + auto* rocks_env = + reinterpret_cast(jrocks_env_handle); + bopt->backup_env = rocks_env; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setShareTableFiles + * Signature: (JZ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setShareTableFiles(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jboolean flag) { + auto* bopt = + reinterpret_cast(jhandle); + bopt->share_table_files = flag; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: shareTableFiles + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_BackupEngineOptions_shareTableFiles(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* bopt = + reinterpret_cast(jhandle); + return bopt->share_table_files; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setInfoLog + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setInfoLog(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jlong /*jlogger_handle*/) { + auto* bopt = + reinterpret_cast(jhandle); + auto* sptr_logger = + reinterpret_cast*>( + jhandle); + bopt->info_log = sptr_logger->get(); +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setSync + * Signature: (JZ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setSync(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jboolean flag) { + auto* bopt = + reinterpret_cast(jhandle); + bopt->sync = flag; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: sync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_BackupEngineOptions_sync(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* bopt = + reinterpret_cast(jhandle); + return bopt->sync; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setDestroyOldData + * Signature: (JZ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setDestroyOldData(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jboolean flag) { + auto* bopt = + reinterpret_cast(jhandle); + bopt->destroy_old_data = flag; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: destroyOldData + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_BackupEngineOptions_destroyOldData(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* bopt = + reinterpret_cast(jhandle); + return bopt->destroy_old_data; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setBackupLogFiles + * Signature: (JZ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setBackupLogFiles(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jboolean flag) { + auto* bopt = + reinterpret_cast(jhandle); + bopt->backup_log_files = flag; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: backupLogFiles + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_BackupEngineOptions_backupLogFiles(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* bopt = + reinterpret_cast(jhandle); + return bopt->backup_log_files; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setBackupRateLimit + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setBackupRateLimit( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jbackup_rate_limit) { + auto* bopt = + reinterpret_cast(jhandle); + bopt->backup_rate_limit = jbackup_rate_limit; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: backupRateLimit + * Signature: (J)J + */ +jlong Java_org_rocksdb_BackupEngineOptions_backupRateLimit(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* bopt = + reinterpret_cast(jhandle); + return bopt->backup_rate_limit; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setBackupRateLimiter + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setBackupRateLimiter( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jrate_limiter_handle) { + auto* bopt = + reinterpret_cast(jhandle); + auto* sptr_rate_limiter = + reinterpret_cast*>( + jrate_limiter_handle); + bopt->backup_rate_limiter = *sptr_rate_limiter; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setRestoreRateLimit + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setRestoreRateLimit( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jrestore_rate_limit) { + auto* bopt = + reinterpret_cast(jhandle); + bopt->restore_rate_limit = jrestore_rate_limit; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: restoreRateLimit + * Signature: (J)J + */ +jlong Java_org_rocksdb_BackupEngineOptions_restoreRateLimit(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* bopt = + reinterpret_cast(jhandle); + return bopt->restore_rate_limit; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setRestoreRateLimiter + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setRestoreRateLimiter( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jrate_limiter_handle) { + auto* bopt = + reinterpret_cast(jhandle); + auto* sptr_rate_limiter = + reinterpret_cast*>( + jrate_limiter_handle); + bopt->restore_rate_limiter = *sptr_rate_limiter; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setShareFilesWithChecksum + * Signature: (JZ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setShareFilesWithChecksum( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean flag) { + auto* bopt = + reinterpret_cast(jhandle); + bopt->share_files_with_checksum = flag; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: shareFilesWithChecksum + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_BackupEngineOptions_shareFilesWithChecksum( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* bopt = + reinterpret_cast(jhandle); + return bopt->share_files_with_checksum; +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setMaxBackgroundOperations + * Signature: (JI)V + */ +void Java_org_rocksdb_BackupEngineOptions_setMaxBackgroundOperations( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jint max_background_operations) { + auto* bopt = + reinterpret_cast(jhandle); + bopt->max_background_operations = static_cast(max_background_operations); +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: maxBackgroundOperations + * Signature: (J)I + */ +jint Java_org_rocksdb_BackupEngineOptions_maxBackgroundOperations( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* bopt = + reinterpret_cast(jhandle); + return static_cast(bopt->max_background_operations); +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: setCallbackTriggerIntervalSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupEngineOptions_setCallbackTriggerIntervalSize( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jcallback_trigger_interval_size) { + auto* bopt = + reinterpret_cast(jhandle); + bopt->callback_trigger_interval_size = + static_cast(jcallback_trigger_interval_size); +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: callbackTriggerIntervalSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_BackupEngineOptions_callbackTriggerIntervalSize( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* bopt = + reinterpret_cast(jhandle); + return static_cast(bopt->callback_trigger_interval_size); +} + +/* + * Class: org_rocksdb_BackupEngineOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_BackupEngineOptions_disposeInternal(JNIEnv* /*env*/, + jobject /*jopt*/, + jlong jhandle) { + auto* bopt = + reinterpret_cast(jhandle); + assert(bopt != nullptr); + delete bopt; +} diff --git a/src/rocksdb/java/rocksjni/backupenginejni.cc b/src/rocksdb/java/rocksjni/backupenginejni.cc new file mode 100644 index 000000000..1ba7ea286 --- /dev/null +++ b/src/rocksdb/java/rocksjni/backupenginejni.cc @@ -0,0 +1,279 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling C++ ROCKSDB_NAMESPACE::BackupEngine methods from the Java side. + +#include + +#include + +#include "include/org_rocksdb_BackupEngine.h" +#include "rocksdb/utilities/backup_engine.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_BackupEngine + * Method: open + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_BackupEngine_open(JNIEnv* env, jclass /*jcls*/, + jlong env_handle, + jlong backup_engine_options_handle) { + auto* rocks_env = reinterpret_cast(env_handle); + auto* backup_engine_options = + reinterpret_cast( + backup_engine_options_handle); + ROCKSDB_NAMESPACE::BackupEngine* backup_engine; + auto status = ROCKSDB_NAMESPACE::BackupEngine::Open( + rocks_env, *backup_engine_options, &backup_engine); + + if (status.ok()) { + return GET_CPLUSPLUS_POINTER(backup_engine); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + return 0; + } +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: createNewBackup + * Signature: (JJZ)V + */ +void Java_org_rocksdb_BackupEngine_createNewBackup( + JNIEnv* env, jobject /*jbe*/, jlong jbe_handle, jlong db_handle, + jboolean jflush_before_backup) { + auto* db = reinterpret_cast(db_handle); + auto* backup_engine = + reinterpret_cast(jbe_handle); + auto status = backup_engine->CreateNewBackup( + db, static_cast(jflush_before_backup)); + + if (status.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: createNewBackupWithMetadata + * Signature: (JJLjava/lang/String;Z)V + */ +void Java_org_rocksdb_BackupEngine_createNewBackupWithMetadata( + JNIEnv* env, jobject /*jbe*/, jlong jbe_handle, jlong db_handle, + jstring japp_metadata, jboolean jflush_before_backup) { + auto* db = reinterpret_cast(db_handle); + auto* backup_engine = + reinterpret_cast(jbe_handle); + + jboolean has_exception = JNI_FALSE; + std::string app_metadata = ROCKSDB_NAMESPACE::JniUtil::copyStdString( + env, japp_metadata, &has_exception); + if (has_exception == JNI_TRUE) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, "Could not copy jstring to std::string"); + return; + } + + auto status = backup_engine->CreateNewBackupWithMetadata( + db, app_metadata, static_cast(jflush_before_backup)); + + if (status.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: getBackupInfo + * Signature: (J)Ljava/util/List; + */ +jobject Java_org_rocksdb_BackupEngine_getBackupInfo(JNIEnv* env, + jobject /*jbe*/, + jlong jbe_handle) { + auto* backup_engine = + reinterpret_cast(jbe_handle); + std::vector backup_infos; + backup_engine->GetBackupInfo(&backup_infos); + return ROCKSDB_NAMESPACE::BackupInfoListJni::getBackupInfo(env, backup_infos); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: getCorruptedBackups + * Signature: (J)[I + */ +jintArray Java_org_rocksdb_BackupEngine_getCorruptedBackups(JNIEnv* env, + jobject /*jbe*/, + jlong jbe_handle) { + auto* backup_engine = + reinterpret_cast(jbe_handle); + std::vector backup_ids; + backup_engine->GetCorruptedBackups(&backup_ids); + // store backupids in int array + std::vector int_backup_ids(backup_ids.begin(), backup_ids.end()); + + // Store ints in java array + // Its ok to loose precision here (64->32) + jsize ret_backup_ids_size = static_cast(backup_ids.size()); + jintArray ret_backup_ids = env->NewIntArray(ret_backup_ids_size); + if (ret_backup_ids == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetIntArrayRegion(ret_backup_ids, 0, ret_backup_ids_size, + int_backup_ids.data()); + return ret_backup_ids; +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: garbageCollect + * Signature: (J)V + */ +void Java_org_rocksdb_BackupEngine_garbageCollect(JNIEnv* env, jobject /*jbe*/, + jlong jbe_handle) { + auto* backup_engine = + reinterpret_cast(jbe_handle); + auto status = backup_engine->GarbageCollect(); + + if (status.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: purgeOldBackups + * Signature: (JI)V + */ +void Java_org_rocksdb_BackupEngine_purgeOldBackups(JNIEnv* env, jobject /*jbe*/, + jlong jbe_handle, + jint jnum_backups_to_keep) { + auto* backup_engine = + reinterpret_cast(jbe_handle); + auto status = backup_engine->PurgeOldBackups( + static_cast(jnum_backups_to_keep)); + + if (status.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: deleteBackup + * Signature: (JI)V + */ +void Java_org_rocksdb_BackupEngine_deleteBackup(JNIEnv* env, jobject /*jbe*/, + jlong jbe_handle, + jint jbackup_id) { + auto* backup_engine = + reinterpret_cast(jbe_handle); + auto status = backup_engine->DeleteBackup( + static_cast(jbackup_id)); + + if (status.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: restoreDbFromBackup + * Signature: (JILjava/lang/String;Ljava/lang/String;J)V + */ +void Java_org_rocksdb_BackupEngine_restoreDbFromBackup( + JNIEnv* env, jobject /*jbe*/, jlong jbe_handle, jint jbackup_id, + jstring jdb_dir, jstring jwal_dir, jlong jrestore_options_handle) { + auto* backup_engine = + reinterpret_cast(jbe_handle); + const char* db_dir = env->GetStringUTFChars(jdb_dir, nullptr); + if (db_dir == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr); + if (wal_dir == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseStringUTFChars(jdb_dir, db_dir); + return; + } + auto* restore_options = reinterpret_cast( + jrestore_options_handle); + auto status = backup_engine->RestoreDBFromBackup( + static_cast(jbackup_id), db_dir, wal_dir, + *restore_options); + + env->ReleaseStringUTFChars(jwal_dir, wal_dir); + env->ReleaseStringUTFChars(jdb_dir, db_dir); + + if (status.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: restoreDbFromLatestBackup + * Signature: (JLjava/lang/String;Ljava/lang/String;J)V + */ +void Java_org_rocksdb_BackupEngine_restoreDbFromLatestBackup( + JNIEnv* env, jobject /*jbe*/, jlong jbe_handle, jstring jdb_dir, + jstring jwal_dir, jlong jrestore_options_handle) { + auto* backup_engine = + reinterpret_cast(jbe_handle); + const char* db_dir = env->GetStringUTFChars(jdb_dir, nullptr); + if (db_dir == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr); + if (wal_dir == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseStringUTFChars(jdb_dir, db_dir); + return; + } + auto* restore_options = reinterpret_cast( + jrestore_options_handle); + auto status = backup_engine->RestoreDBFromLatestBackup(db_dir, wal_dir, + *restore_options); + + env->ReleaseStringUTFChars(jwal_dir, wal_dir); + env->ReleaseStringUTFChars(jdb_dir, db_dir); + + if (status.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_BackupEngine_disposeInternal(JNIEnv* /*env*/, + jobject /*jbe*/, + jlong jbe_handle) { + auto* be = reinterpret_cast(jbe_handle); + assert(be != nullptr); + delete be; +} diff --git a/src/rocksdb/java/rocksjni/cache.cc b/src/rocksdb/java/rocksjni/cache.cc new file mode 100644 index 000000000..33c0a2f0b --- /dev/null +++ b/src/rocksdb/java/rocksjni/cache.cc @@ -0,0 +1,35 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Cache. + +#include "rocksdb/cache.h" + +#include + +#include "include/org_rocksdb_Cache.h" + +/* + * Class: org_rocksdb_Cache + * Method: getUsage + * Signature: (J)J + */ +jlong Java_org_rocksdb_Cache_getUsage(JNIEnv*, jclass, jlong jhandle) { + auto* sptr_cache = + reinterpret_cast*>(jhandle); + return static_cast(sptr_cache->get()->GetUsage()); +} + +/* + * Class: org_rocksdb_Cache + * Method: getPinnedUsage + * Signature: (J)J + */ +jlong Java_org_rocksdb_Cache_getPinnedUsage(JNIEnv*, jclass, jlong jhandle) { + auto* sptr_cache = + reinterpret_cast*>(jhandle); + return static_cast(sptr_cache->get()->GetPinnedUsage()); +} diff --git a/src/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc b/src/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc new file mode 100644 index 000000000..25817aeca --- /dev/null +++ b/src/rocksdb/java/rocksjni/cassandra_compactionfilterjni.cc @@ -0,0 +1,25 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include + +#include "include/org_rocksdb_CassandraCompactionFilter.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "utilities/cassandra/cassandra_compaction_filter.h" + +/* + * Class: org_rocksdb_CassandraCompactionFilter + * Method: createNewCassandraCompactionFilter0 + * Signature: (ZI)J + */ +jlong Java_org_rocksdb_CassandraCompactionFilter_createNewCassandraCompactionFilter0( + JNIEnv* /*env*/, jclass /*jcls*/, jboolean purge_ttl_on_expiration, + jint gc_grace_period_in_seconds) { + auto* compaction_filter = + new ROCKSDB_NAMESPACE::cassandra::CassandraCompactionFilter( + purge_ttl_on_expiration, gc_grace_period_in_seconds); + // set the native handle to our native compaction filter + return GET_CPLUSPLUS_POINTER(compaction_filter); +} diff --git a/src/rocksdb/java/rocksjni/cassandra_value_operator.cc b/src/rocksdb/java/rocksjni/cassandra_value_operator.cc new file mode 100644 index 000000000..6de28c1b1 --- /dev/null +++ b/src/rocksdb/java/rocksjni/cassandra_value_operator.cc @@ -0,0 +1,50 @@ +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include +#include +#include + +#include +#include + +#include "include/org_rocksdb_CassandraValueMergeOperator.h" +#include "rocksdb/db.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/options.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/statistics.h" +#include "rocksdb/table.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" +#include "utilities/cassandra/merge_operator.h" + +/* + * Class: org_rocksdb_CassandraValueMergeOperator + * Method: newSharedCassandraValueMergeOperator + * Signature: (II)J + */ +jlong Java_org_rocksdb_CassandraValueMergeOperator_newSharedCassandraValueMergeOperator( + JNIEnv* /*env*/, jclass /*jclazz*/, jint gcGracePeriodInSeconds, + jint operands_limit) { + auto* op = new std::shared_ptr( + new ROCKSDB_NAMESPACE::cassandra::CassandraValueMergeOperator( + gcGracePeriodInSeconds, operands_limit)); + return GET_CPLUSPLUS_POINTER(op); +} + +/* + * Class: org_rocksdb_CassandraValueMergeOperator + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CassandraValueMergeOperator_disposeInternal( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* op = + reinterpret_cast*>( + jhandle); + delete op; +} diff --git a/src/rocksdb/java/rocksjni/checkpoint.cc b/src/rocksdb/java/rocksjni/checkpoint.cc new file mode 100644 index 000000000..d7cfd813b --- /dev/null +++ b/src/rocksdb/java/rocksjni/checkpoint.cc @@ -0,0 +1,71 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::Checkpoint methods from Java side. + +#include "rocksdb/utilities/checkpoint.h" + +#include +#include +#include + +#include + +#include "include/org_rocksdb_Checkpoint.h" +#include "rocksdb/db.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" +/* + * Class: org_rocksdb_Checkpoint + * Method: newCheckpoint + * Signature: (J)J + */ +jlong Java_org_rocksdb_Checkpoint_newCheckpoint(JNIEnv* /*env*/, + jclass /*jclazz*/, + jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::Checkpoint* checkpoint; + ROCKSDB_NAMESPACE::Checkpoint::Create(db, &checkpoint); + return GET_CPLUSPLUS_POINTER(checkpoint); +} + +/* + * Class: org_rocksdb_Checkpoint + * Method: dispose + * Signature: (J)V + */ +void Java_org_rocksdb_Checkpoint_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* checkpoint = reinterpret_cast(jhandle); + assert(checkpoint != nullptr); + delete checkpoint; +} + +/* + * Class: org_rocksdb_Checkpoint + * Method: createCheckpoint + * Signature: (JLjava/lang/String;)V + */ +void Java_org_rocksdb_Checkpoint_createCheckpoint(JNIEnv* env, jobject /*jobj*/, + jlong jcheckpoint_handle, + jstring jcheckpoint_path) { + const char* checkpoint_path = env->GetStringUTFChars(jcheckpoint_path, 0); + if (checkpoint_path == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + auto* checkpoint = + reinterpret_cast(jcheckpoint_handle); + ROCKSDB_NAMESPACE::Status s = checkpoint->CreateCheckpoint(checkpoint_path); + + env->ReleaseStringUTFChars(jcheckpoint_path, checkpoint_path); + + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} diff --git a/src/rocksdb/java/rocksjni/clock_cache.cc b/src/rocksdb/java/rocksjni/clock_cache.cc new file mode 100644 index 000000000..e04991aa9 --- /dev/null +++ b/src/rocksdb/java/rocksjni/clock_cache.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::ClockCache. + +#include "cache/clock_cache.h" + +#include + +#include "include/org_rocksdb_ClockCache.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_ClockCache + * Method: newClockCache + * Signature: (JIZ)J + */ +jlong Java_org_rocksdb_ClockCache_newClockCache( + JNIEnv* /*env*/, jclass /*jcls*/, jlong jcapacity, jint jnum_shard_bits, + jboolean jstrict_capacity_limit) { + auto* sptr_clock_cache = new std::shared_ptr( + ROCKSDB_NAMESPACE::NewClockCache( + static_cast(jcapacity), static_cast(jnum_shard_bits), + static_cast(jstrict_capacity_limit))); + return GET_CPLUSPLUS_POINTER(sptr_clock_cache); +} + +/* + * Class: org_rocksdb_ClockCache + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ClockCache_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* sptr_clock_cache = + reinterpret_cast*>(jhandle); + delete sptr_clock_cache; // delete std::shared_ptr +} diff --git a/src/rocksdb/java/rocksjni/columnfamilyhandle.cc b/src/rocksdb/java/rocksjni/columnfamilyhandle.cc new file mode 100644 index 000000000..4140580f0 --- /dev/null +++ b/src/rocksdb/java/rocksjni/columnfamilyhandle.cc @@ -0,0 +1,72 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::ColumnFamilyHandle. + +#include +#include +#include + +#include "include/org_rocksdb_ColumnFamilyHandle.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_ColumnFamilyHandle + * Method: getName + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_ColumnFamilyHandle_getName(JNIEnv* env, + jobject /*jobj*/, + jlong jhandle) { + auto* cfh = reinterpret_cast(jhandle); + std::string cf_name = cfh->GetName(); + return ROCKSDB_NAMESPACE::JniUtil::copyBytes(env, cf_name); +} + +/* + * Class: org_rocksdb_ColumnFamilyHandle + * Method: getID + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyHandle_getID(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* cfh = reinterpret_cast(jhandle); + const int32_t id = cfh->GetID(); + return static_cast(id); +} + +/* + * Class: org_rocksdb_ColumnFamilyHandle + * Method: getDescriptor + * Signature: (J)Lorg/rocksdb/ColumnFamilyDescriptor; + */ +jobject Java_org_rocksdb_ColumnFamilyHandle_getDescriptor(JNIEnv* env, + jobject /*jobj*/, + jlong jhandle) { + auto* cfh = reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::ColumnFamilyDescriptor desc; + ROCKSDB_NAMESPACE::Status s = cfh->GetDescriptor(&desc); + if (s.ok()) { + return ROCKSDB_NAMESPACE::ColumnFamilyDescriptorJni::construct(env, &desc); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } +} + +/* + * Class: org_rocksdb_ColumnFamilyHandle + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ColumnFamilyHandle_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* cfh = reinterpret_cast(jhandle); + assert(cfh != nullptr); + delete cfh; +} diff --git a/src/rocksdb/java/rocksjni/compact_range_options.cc b/src/rocksdb/java/rocksjni/compact_range_options.cc new file mode 100644 index 000000000..77fbb8890 --- /dev/null +++ b/src/rocksdb/java/rocksjni/compact_range_options.cc @@ -0,0 +1,222 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompactRangeOptions. + +#include + +#include "include/org_rocksdb_CompactRangeOptions.h" +#include "rocksdb/options.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: newCompactRangeOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_CompactRangeOptions_newCompactRangeOptions( + JNIEnv* /*env*/, jclass /*jclazz*/) { + auto* options = new ROCKSDB_NAMESPACE::CompactRangeOptions(); + return GET_CPLUSPLUS_POINTER(options); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: exclusiveManualCompaction + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_CompactRangeOptions_exclusiveManualCompaction( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return static_cast(options->exclusive_manual_compaction); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: setExclusiveManualCompaction + * Signature: (JZ)V + */ +void Java_org_rocksdb_CompactRangeOptions_setExclusiveManualCompaction( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jboolean exclusive_manual_compaction) { + auto* options = + reinterpret_cast(jhandle); + options->exclusive_manual_compaction = + static_cast(exclusive_manual_compaction); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: bottommostLevelCompaction + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactRangeOptions_bottommostLevelCompaction( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::BottommostLevelCompactionJni:: + toJavaBottommostLevelCompaction(options->bottommost_level_compaction); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: setBottommostLevelCompaction + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactRangeOptions_setBottommostLevelCompaction( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jint bottommost_level_compaction) { + auto* options = + reinterpret_cast(jhandle); + options->bottommost_level_compaction = + ROCKSDB_NAMESPACE::BottommostLevelCompactionJni:: + toCppBottommostLevelCompaction(bottommost_level_compaction); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: changeLevel + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_CompactRangeOptions_changeLevel(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return static_cast(options->change_level); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: setChangeLevel + * Signature: (JZ)V + */ +void Java_org_rocksdb_CompactRangeOptions_setChangeLevel( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean change_level) { + auto* options = + reinterpret_cast(jhandle); + options->change_level = static_cast(change_level); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: targetLevel + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactRangeOptions_targetLevel(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return static_cast(options->target_level); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: setTargetLevel + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactRangeOptions_setTargetLevel(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jint target_level) { + auto* options = + reinterpret_cast(jhandle); + options->target_level = static_cast(target_level); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: targetPathId + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactRangeOptions_targetPathId(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return static_cast(options->target_path_id); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: setTargetPathId + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactRangeOptions_setTargetPathId(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jint target_path_id) { + auto* options = + reinterpret_cast(jhandle); + options->target_path_id = static_cast(target_path_id); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: allowWriteStall + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_CompactRangeOptions_allowWriteStall(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return static_cast(options->allow_write_stall); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: setAllowWriteStall + * Signature: (JZ)V + */ +void Java_org_rocksdb_CompactRangeOptions_setAllowWriteStall( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jboolean allow_write_stall) { + auto* options = + reinterpret_cast(jhandle); + options->allow_write_stall = static_cast(allow_write_stall); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: maxSubcompactions + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactRangeOptions_maxSubcompactions(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return static_cast(options->max_subcompactions); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: setMaxSubcompactions + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactRangeOptions_setMaxSubcompactions( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jint max_subcompactions) { + auto* options = + reinterpret_cast(jhandle); + options->max_subcompactions = static_cast(max_subcompactions); +} + +/* + * Class: org_rocksdb_CompactRangeOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompactRangeOptions_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + delete options; +} diff --git a/src/rocksdb/java/rocksjni/compaction_filter.cc b/src/rocksdb/java/rocksjni/compaction_filter.cc new file mode 100644 index 000000000..ea04996ac --- /dev/null +++ b/src/rocksdb/java/rocksjni/compaction_filter.cc @@ -0,0 +1,29 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompactionFilter. + +#include "rocksdb/compaction_filter.h" + +#include + +#include "include/org_rocksdb_AbstractCompactionFilter.h" + +// + +/* + * Class: org_rocksdb_AbstractCompactionFilter + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_AbstractCompactionFilter_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + auto* cf = reinterpret_cast(handle); + assert(cf != nullptr); + delete cf; +} +// diff --git a/src/rocksdb/java/rocksjni/compaction_filter_factory.cc b/src/rocksdb/java/rocksjni/compaction_filter_factory.cc new file mode 100644 index 000000000..16fbdbbdd --- /dev/null +++ b/src/rocksdb/java/rocksjni/compaction_filter_factory.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompactionFilterFactory. + +#include + +#include + +#include "include/org_rocksdb_AbstractCompactionFilterFactory.h" +#include "rocksjni/compaction_filter_factory_jnicallback.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_AbstractCompactionFilterFactory + * Method: createNewCompactionFilterFactory0 + * Signature: ()J + */ +jlong Java_org_rocksdb_AbstractCompactionFilterFactory_createNewCompactionFilterFactory0( + JNIEnv* env, jobject jobj) { + auto* cff = + new ROCKSDB_NAMESPACE::CompactionFilterFactoryJniCallback(env, jobj); + auto* ptr_sptr_cff = new std::shared_ptr< + ROCKSDB_NAMESPACE::CompactionFilterFactoryJniCallback>(cff); + return GET_CPLUSPLUS_POINTER(ptr_sptr_cff); +} + +/* + * Class: org_rocksdb_AbstractCompactionFilterFactory + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_AbstractCompactionFilterFactory_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { + auto* ptr_sptr_cff = reinterpret_cast< + std::shared_ptr*>( + jhandle); + delete ptr_sptr_cff; +} diff --git a/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.cc b/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.cc new file mode 100644 index 000000000..14285526f --- /dev/null +++ b/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.cc @@ -0,0 +1,79 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompactionFilterFactory. + +#include "rocksjni/compaction_filter_factory_jnicallback.h" + +#include "rocksjni/portal.h" + +namespace ROCKSDB_NAMESPACE { +CompactionFilterFactoryJniCallback::CompactionFilterFactoryJniCallback( + JNIEnv* env, jobject jcompaction_filter_factory) + : JniCallback(env, jcompaction_filter_factory) { + // Note: The name of a CompactionFilterFactory will not change during + // it's lifetime, so we cache it in a global var + jmethodID jname_method_id = + AbstractCompactionFilterFactoryJni::getNameMethodId(env); + if (jname_method_id == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + jstring jname = + (jstring)env->CallObjectMethod(m_jcallback_obj, jname_method_id); + if (env->ExceptionCheck()) { + // exception thrown + return; + } + jboolean has_exception = JNI_FALSE; + m_name = + JniUtil::copyString(env, jname, &has_exception); // also releases jname + if (has_exception == JNI_TRUE) { + // exception thrown + return; + } + + m_jcreate_compaction_filter_methodid = + AbstractCompactionFilterFactoryJni::getCreateCompactionFilterMethodId( + env); + if (m_jcreate_compaction_filter_methodid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } +} + +const char* CompactionFilterFactoryJniCallback::Name() const { + return m_name.get(); +} + +std::unique_ptr +CompactionFilterFactoryJniCallback::CreateCompactionFilter( + const CompactionFilter::Context& context) { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + assert(env != nullptr); + + jlong addr_compaction_filter = + env->CallLongMethod(m_jcallback_obj, m_jcreate_compaction_filter_methodid, + static_cast(context.is_full_compaction), + static_cast(context.is_manual_compaction)); + + if (env->ExceptionCheck()) { + // exception thrown from CallLongMethod + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return nullptr; + } + + auto* cff = reinterpret_cast(addr_compaction_filter); + + releaseJniEnv(attached_thread); + + return std::unique_ptr(cff); +} + +} // namespace ROCKSDB_NAMESPACE diff --git a/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.h b/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.h new file mode 100644 index 000000000..2f26f8dbe --- /dev/null +++ b/src/rocksdb/java/rocksjni/compaction_filter_factory_jnicallback.h @@ -0,0 +1,37 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompactionFilterFactory. + +#ifndef JAVA_ROCKSJNI_COMPACTION_FILTER_FACTORY_JNICALLBACK_H_ +#define JAVA_ROCKSJNI_COMPACTION_FILTER_FACTORY_JNICALLBACK_H_ + +#include + +#include + +#include "rocksdb/compaction_filter.h" +#include "rocksjni/jnicallback.h" + +namespace ROCKSDB_NAMESPACE { + +class CompactionFilterFactoryJniCallback : public JniCallback, + public CompactionFilterFactory { + public: + CompactionFilterFactoryJniCallback(JNIEnv* env, + jobject jcompaction_filter_factory); + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context); + virtual const char* Name() const; + + private: + std::unique_ptr m_name; + jmethodID m_jcreate_compaction_filter_methodid; +}; + +} // namespace ROCKSDB_NAMESPACE + +#endif // JAVA_ROCKSJNI_COMPACTION_FILTER_FACTORY_JNICALLBACK_H_ diff --git a/src/rocksdb/java/rocksjni/compaction_job_info.cc b/src/rocksdb/java/rocksjni/compaction_job_info.cc new file mode 100644 index 000000000..fb292f59c --- /dev/null +++ b/src/rocksdb/java/rocksjni/compaction_job_info.cc @@ -0,0 +1,230 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompactionJobInfo. + +#include + +#include "include/org_rocksdb_CompactionJobInfo.h" +#include "rocksdb/listener.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: newCompactionJobInfo + * Signature: ()J + */ +jlong Java_org_rocksdb_CompactionJobInfo_newCompactionJobInfo(JNIEnv*, jclass) { + auto* compact_job_info = new ROCKSDB_NAMESPACE::CompactionJobInfo(); + return GET_CPLUSPLUS_POINTER(compact_job_info); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionJobInfo_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + delete compact_job_info; +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: columnFamilyName + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_CompactionJobInfo_columnFamilyName(JNIEnv* env, + jclass, + jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::JniUtil::copyBytes(env, compact_job_info->cf_name); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: status + * Signature: (J)Lorg/rocksdb/Status; + */ +jobject Java_org_rocksdb_CompactionJobInfo_status(JNIEnv* env, jclass, + jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::StatusJni::construct(env, compact_job_info->status); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: threadId + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobInfo_threadId(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return static_cast(compact_job_info->thread_id); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: jobId + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionJobInfo_jobId(JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return static_cast(compact_job_info->job_id); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: baseInputLevel + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionJobInfo_baseInputLevel(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return static_cast(compact_job_info->base_input_level); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: outputLevel + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionJobInfo_outputLevel(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return static_cast(compact_job_info->output_level); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: inputFiles + * Signature: (J)[Ljava/lang/String; + */ +jobjectArray Java_org_rocksdb_CompactionJobInfo_inputFiles(JNIEnv* env, jclass, + jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::JniUtil::toJavaStrings( + env, &compact_job_info->input_files); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: outputFiles + * Signature: (J)[Ljava/lang/String; + */ +jobjectArray Java_org_rocksdb_CompactionJobInfo_outputFiles(JNIEnv* env, jclass, + jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::JniUtil::toJavaStrings( + env, &compact_job_info->output_files); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: tableProperties + * Signature: (J)Ljava/util/Map; + */ +jobject Java_org_rocksdb_CompactionJobInfo_tableProperties(JNIEnv* env, jclass, + jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + auto* map = &compact_job_info->table_properties; + + jobject jhash_map = ROCKSDB_NAMESPACE::HashMapJni::construct( + env, static_cast(map->size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV< + const std::string, + std::shared_ptr, jobject, + jobject> + fn_map_kv = + [env](const std::pair< + const std::string, + std::shared_ptr>& + kv) { + jstring jkey = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &(kv.first), false); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jobject jtable_properties = + ROCKSDB_NAMESPACE::TablePropertiesJni::fromCppTableProperties( + env, *(kv.second.get())); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>( + new std::pair(static_cast(jkey), + jtable_properties)); + }; + + if (!ROCKSDB_NAMESPACE::HashMapJni::putAll(env, jhash_map, map->begin(), + map->end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: compactionReason + * Signature: (J)B + */ +jbyte Java_org_rocksdb_CompactionJobInfo_compactionReason(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompactionReasonJni::toJavaCompactionReason( + compact_job_info->compaction_reason); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: compression + * Signature: (J)B + */ +jbyte Java_org_rocksdb_CompactionJobInfo_compression(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType( + compact_job_info->compression); +} + +/* + * Class: org_rocksdb_CompactionJobInfo + * Method: stats + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobInfo_stats(JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_info = + reinterpret_cast(jhandle); + auto* stats = new ROCKSDB_NAMESPACE::CompactionJobStats(); + stats->Add(compact_job_info->stats); + return GET_CPLUSPLUS_POINTER(stats); +} diff --git a/src/rocksdb/java/rocksjni/compaction_job_stats.cc b/src/rocksdb/java/rocksjni/compaction_job_stats.cc new file mode 100644 index 000000000..a2599c132 --- /dev/null +++ b/src/rocksdb/java/rocksjni/compaction_job_stats.cc @@ -0,0 +1,345 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompactionJobStats. + +#include "rocksdb/compaction_job_stats.h" + +#include + +#include "include/org_rocksdb_CompactionJobStats.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: newCompactionJobStats + * Signature: ()J + */ +jlong Java_org_rocksdb_CompactionJobStats_newCompactionJobStats(JNIEnv*, + jclass) { + auto* compact_job_stats = new ROCKSDB_NAMESPACE::CompactionJobStats(); + return GET_CPLUSPLUS_POINTER(compact_job_stats); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionJobStats_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + delete compact_job_stats; +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: reset + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionJobStats_reset(JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + compact_job_stats->Reset(); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: add + * Signature: (JJ)V + */ +void Java_org_rocksdb_CompactionJobStats_add(JNIEnv*, jclass, jlong jhandle, + jlong jother_handle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + auto* other_compact_job_stats = + reinterpret_cast(jother_handle); + compact_job_stats->Add(*other_compact_job_stats); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: elapsedMicros + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_elapsedMicros(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->elapsed_micros); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numInputRecords + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numInputRecords(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_input_records); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numInputFiles + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numInputFiles(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_input_files); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numInputFilesAtOutputLevel + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numInputFilesAtOutputLevel( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_input_files_at_output_level); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numOutputRecords + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numOutputRecords(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_output_records); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numOutputFiles + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numOutputFiles(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_output_files); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: isManualCompaction + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_CompactionJobStats_isManualCompaction(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + if (compact_job_stats->is_manual_compaction) { + return JNI_TRUE; + } else { + return JNI_FALSE; + } +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: totalInputBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_totalInputBytes(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->total_input_bytes); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: totalOutputBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_totalOutputBytes(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->total_output_bytes); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numRecordsReplaced + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numRecordsReplaced(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_records_replaced); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: totalInputRawKeyBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_totalInputRawKeyBytes(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->total_input_raw_key_bytes); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: totalInputRawValueBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_totalInputRawValueBytes( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->total_input_raw_value_bytes); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numInputDeletionRecords + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numInputDeletionRecords( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_input_deletion_records); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numExpiredDeletionRecords + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numExpiredDeletionRecords( + JNIEnv*, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_expired_deletion_records); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numCorruptKeys + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numCorruptKeys(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_corrupt_keys); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: fileWriteNanos + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_fileWriteNanos(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->file_write_nanos); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: fileRangeSyncNanos + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_fileRangeSyncNanos(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->file_range_sync_nanos); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: fileFsyncNanos + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_fileFsyncNanos(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->file_fsync_nanos); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: filePrepareWriteNanos + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_filePrepareWriteNanos(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->file_prepare_write_nanos); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: smallestOutputKeyPrefix + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_CompactionJobStats_smallestOutputKeyPrefix( + JNIEnv* env, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::JniUtil::copyBytes( + env, compact_job_stats->smallest_output_key_prefix); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: largestOutputKeyPrefix + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_CompactionJobStats_largestOutputKeyPrefix( + JNIEnv* env, jclass, jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::JniUtil::copyBytes( + env, compact_job_stats->largest_output_key_prefix); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numSingleDelFallthru + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numSingleDelFallthru(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_single_del_fallthru); +} + +/* + * Class: org_rocksdb_CompactionJobStats + * Method: numSingleDelMismatch + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionJobStats_numSingleDelMismatch(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_job_stats = + reinterpret_cast(jhandle); + return static_cast(compact_job_stats->num_single_del_mismatch); +} diff --git a/src/rocksdb/java/rocksjni/compaction_options.cc b/src/rocksdb/java/rocksjni/compaction_options.cc new file mode 100644 index 000000000..bbbde0313 --- /dev/null +++ b/src/rocksdb/java/rocksjni/compaction_options.cc @@ -0,0 +1,112 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompactionOptions. + +#include + +#include "include/org_rocksdb_CompactionOptions.h" +#include "rocksdb/options.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_CompactionOptions + * Method: newCompactionOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_CompactionOptions_newCompactionOptions(JNIEnv*, jclass) { + auto* compact_opts = new ROCKSDB_NAMESPACE::CompactionOptions(); + return GET_CPLUSPLUS_POINTER(compact_opts); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionOptions_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* compact_opts = + reinterpret_cast(jhandle); + delete compact_opts; +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: compression + * Signature: (J)B + */ +jbyte Java_org_rocksdb_CompactionOptions_compression(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_opts = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType( + compact_opts->compression); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: setCompression + * Signature: (JB)V + */ +void Java_org_rocksdb_CompactionOptions_setCompression( + JNIEnv*, jclass, jlong jhandle, jbyte jcompression_type_value) { + auto* compact_opts = + reinterpret_cast(jhandle); + compact_opts->compression = + ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType( + jcompression_type_value); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: outputFileSizeLimit + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionOptions_outputFileSizeLimit(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_opts = + reinterpret_cast(jhandle); + return static_cast(compact_opts->output_file_size_limit); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: setOutputFileSizeLimit + * Signature: (JJ)V + */ +void Java_org_rocksdb_CompactionOptions_setOutputFileSizeLimit( + JNIEnv*, jclass, jlong jhandle, jlong joutput_file_size_limit) { + auto* compact_opts = + reinterpret_cast(jhandle); + compact_opts->output_file_size_limit = + static_cast(joutput_file_size_limit); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: maxSubcompactions + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptions_maxSubcompactions(JNIEnv*, jclass, + jlong jhandle) { + auto* compact_opts = + reinterpret_cast(jhandle); + return static_cast(compact_opts->max_subcompactions); +} + +/* + * Class: org_rocksdb_CompactionOptions + * Method: setMaxSubcompactions + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptions_setMaxSubcompactions( + JNIEnv*, jclass, jlong jhandle, jint jmax_subcompactions) { + auto* compact_opts = + reinterpret_cast(jhandle); + compact_opts->max_subcompactions = static_cast(jmax_subcompactions); +} diff --git a/src/rocksdb/java/rocksjni/compaction_options_fifo.cc b/src/rocksdb/java/rocksjni/compaction_options_fifo.cc new file mode 100644 index 000000000..f6a47fec5 --- /dev/null +++ b/src/rocksdb/java/rocksjni/compaction_options_fifo.cc @@ -0,0 +1,83 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompactionOptionsFIFO. + +#include + +#include "include/org_rocksdb_CompactionOptionsFIFO.h" +#include "rocksdb/advanced_options.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_CompactionOptionsFIFO + * Method: newCompactionOptionsFIFO + * Signature: ()J + */ +jlong Java_org_rocksdb_CompactionOptionsFIFO_newCompactionOptionsFIFO(JNIEnv*, + jclass) { + const auto* opt = new ROCKSDB_NAMESPACE::CompactionOptionsFIFO(); + return GET_CPLUSPLUS_POINTER(opt); +} + +/* + * Class: org_rocksdb_CompactionOptionsFIFO + * Method: setMaxTableFilesSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_CompactionOptionsFIFO_setMaxTableFilesSize( + JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) { + auto* opt = + reinterpret_cast(jhandle); + opt->max_table_files_size = static_cast(jmax_table_files_size); +} + +/* + * Class: org_rocksdb_CompactionOptionsFIFO + * Method: maxTableFilesSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompactionOptionsFIFO_maxTableFilesSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + return static_cast(opt->max_table_files_size); +} + +/* + * Class: org_rocksdb_CompactionOptionsFIFO + * Method: setAllowCompaction + * Signature: (JZ)V + */ +void Java_org_rocksdb_CompactionOptionsFIFO_setAllowCompaction( + JNIEnv*, jobject, jlong jhandle, jboolean allow_compaction) { + auto* opt = + reinterpret_cast(jhandle); + opt->allow_compaction = static_cast(allow_compaction); +} + +/* + * Class: org_rocksdb_CompactionOptionsFIFO + * Method: allowCompaction + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_CompactionOptionsFIFO_allowCompaction(JNIEnv*, + jobject, + jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + return static_cast(opt->allow_compaction); +} + +/* + * Class: org_rocksdb_CompactionOptionsFIFO + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionOptionsFIFO_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/src/rocksdb/java/rocksjni/compaction_options_universal.cc b/src/rocksdb/java/rocksjni/compaction_options_universal.cc new file mode 100644 index 000000000..9fc6f3158 --- /dev/null +++ b/src/rocksdb/java/rocksjni/compaction_options_universal.cc @@ -0,0 +1,209 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompactionOptionsUniversal. + +#include + +#include "include/org_rocksdb_CompactionOptionsUniversal.h" +#include "rocksdb/advanced_options.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: newCompactionOptionsUniversal + * Signature: ()J + */ +jlong Java_org_rocksdb_CompactionOptionsUniversal_newCompactionOptionsUniversal( + JNIEnv*, jclass) { + const auto* opt = new ROCKSDB_NAMESPACE::CompactionOptionsUniversal(); + return GET_CPLUSPLUS_POINTER(opt); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setSizeRatio + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setSizeRatio( + JNIEnv*, jobject, jlong jhandle, jint jsize_ratio) { + auto* opt = + reinterpret_cast(jhandle); + opt->size_ratio = static_cast(jsize_ratio); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: sizeRatio + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptionsUniversal_sizeRatio(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + return static_cast(opt->size_ratio); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setMinMergeWidth + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setMinMergeWidth( + JNIEnv*, jobject, jlong jhandle, jint jmin_merge_width) { + auto* opt = + reinterpret_cast(jhandle); + opt->min_merge_width = static_cast(jmin_merge_width); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: minMergeWidth + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptionsUniversal_minMergeWidth(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + return static_cast(opt->min_merge_width); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setMaxMergeWidth + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setMaxMergeWidth( + JNIEnv*, jobject, jlong jhandle, jint jmax_merge_width) { + auto* opt = + reinterpret_cast(jhandle); + opt->max_merge_width = static_cast(jmax_merge_width); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: maxMergeWidth + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptionsUniversal_maxMergeWidth(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + return static_cast(opt->max_merge_width); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setMaxSizeAmplificationPercent + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setMaxSizeAmplificationPercent( + JNIEnv*, jobject, jlong jhandle, jint jmax_size_amplification_percent) { + auto* opt = + reinterpret_cast(jhandle); + opt->max_size_amplification_percent = + static_cast(jmax_size_amplification_percent); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: maxSizeAmplificationPercent + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptionsUniversal_maxSizeAmplificationPercent( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + return static_cast(opt->max_size_amplification_percent); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setCompressionSizePercent + * Signature: (JI)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setCompressionSizePercent( + JNIEnv*, jobject, jlong jhandle, jint jcompression_size_percent) { + auto* opt = + reinterpret_cast(jhandle); + opt->compression_size_percent = + static_cast(jcompression_size_percent); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: compressionSizePercent + * Signature: (J)I + */ +jint Java_org_rocksdb_CompactionOptionsUniversal_compressionSizePercent( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + return static_cast(opt->compression_size_percent); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setStopStyle + * Signature: (JB)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setStopStyle( + JNIEnv*, jobject, jlong jhandle, jbyte jstop_style_value) { + auto* opt = + reinterpret_cast(jhandle); + opt->stop_style = + ROCKSDB_NAMESPACE::CompactionStopStyleJni::toCppCompactionStopStyle( + jstop_style_value); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: stopStyle + * Signature: (J)B + */ +jbyte Java_org_rocksdb_CompactionOptionsUniversal_stopStyle(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompactionStopStyleJni::toJavaCompactionStopStyle( + opt->stop_style); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: setAllowTrivialMove + * Signature: (JZ)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_setAllowTrivialMove( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_trivial_move) { + auto* opt = + reinterpret_cast(jhandle); + opt->allow_trivial_move = static_cast(jallow_trivial_move); +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: allowTrivialMove + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_CompactionOptionsUniversal_allowTrivialMove( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + return opt->allow_trivial_move; +} + +/* + * Class: org_rocksdb_CompactionOptionsUniversal + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompactionOptionsUniversal_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { + delete reinterpret_cast( + jhandle); +} diff --git a/src/rocksdb/java/rocksjni/comparator.cc b/src/rocksdb/java/rocksjni/comparator.cc new file mode 100644 index 000000000..11279c4ce --- /dev/null +++ b/src/rocksdb/java/rocksjni/comparator.cc @@ -0,0 +1,60 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Comparator. + +#include +#include +#include + +#include +#include + +#include "include/org_rocksdb_AbstractComparator.h" +#include "include/org_rocksdb_NativeComparatorWrapper.h" +#include "rocksjni/comparatorjnicallback.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_AbstractComparator + * Method: createNewComparator + * Signature: (J)J + */ +jlong Java_org_rocksdb_AbstractComparator_createNewComparator( + JNIEnv* env, jobject jcomparator, jlong copt_handle) { + auto* copt = + reinterpret_cast( + copt_handle); + auto* c = + new ROCKSDB_NAMESPACE::ComparatorJniCallback(env, jcomparator, copt); + return GET_CPLUSPLUS_POINTER(c); +} + +/* + * Class: org_rocksdb_AbstractComparator + * Method: usingDirectBuffers + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_AbstractComparator_usingDirectBuffers(JNIEnv*, + jobject, + jlong jhandle) { + auto* c = + reinterpret_cast(jhandle); + return static_cast(c->m_options->direct_buffer); +} + +/* + * Class: org_rocksdb_NativeComparatorWrapper + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_NativeComparatorWrapper_disposeInternal( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jcomparator_handle) { + auto* comparator = + reinterpret_cast(jcomparator_handle); + delete comparator; +} diff --git a/src/rocksdb/java/rocksjni/comparatorjnicallback.cc b/src/rocksdb/java/rocksjni/comparatorjnicallback.cc new file mode 100644 index 000000000..07ab9fa41 --- /dev/null +++ b/src/rocksdb/java/rocksjni/comparatorjnicallback.cc @@ -0,0 +1,646 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Comparator. + +#include "rocksjni/comparatorjnicallback.h" + +#include "rocksjni/portal.h" + +namespace ROCKSDB_NAMESPACE { +ComparatorJniCallback::ComparatorJniCallback( + JNIEnv* env, jobject jcomparator, + const ComparatorJniCallbackOptions* options) + : JniCallback(env, jcomparator), m_options(options) { + // cache the AbstractComparatorJniBridge class as we will reuse it many times + // for each callback + m_abstract_comparator_jni_bridge_clazz = static_cast( + env->NewGlobalRef(AbstractComparatorJniBridge::getJClass(env))); + + // Note: The name of a Comparator will not change during it's lifetime, + // so we cache it in a global var + jmethodID jname_mid = AbstractComparatorJni::getNameMethodId(env); + if (jname_mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + jstring js_name = (jstring)env->CallObjectMethod(m_jcallback_obj, jname_mid); + if (env->ExceptionCheck()) { + // exception thrown + return; + } + jboolean has_exception = JNI_FALSE; + m_name = JniUtil::copyString(env, js_name, + &has_exception); // also releases jsName + if (has_exception == JNI_TRUE) { + // exception thrown + return; + } + + // cache the ByteBuffer class as we will reuse it many times for each callback + m_jbytebuffer_clazz = + static_cast(env->NewGlobalRef(ByteBufferJni::getJClass(env))); + + m_jcompare_mid = AbstractComparatorJniBridge::getCompareInternalMethodId( + env, m_abstract_comparator_jni_bridge_clazz); + if (m_jcompare_mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + m_jshortest_mid = + AbstractComparatorJniBridge::getFindShortestSeparatorInternalMethodId( + env, m_abstract_comparator_jni_bridge_clazz); + if (m_jshortest_mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + m_jshort_mid = + AbstractComparatorJniBridge::getFindShortSuccessorInternalMethodId( + env, m_abstract_comparator_jni_bridge_clazz); + if (m_jshort_mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + // do we need reusable buffers? + if (m_options->max_reused_buffer_size > -1) { + if (m_options->reused_synchronisation_type == + ReusedSynchronisationType::THREAD_LOCAL) { + // buffers reused per thread + UnrefHandler unref = [](void* ptr) { + ThreadLocalBuf* tlb = reinterpret_cast(ptr); + jboolean attached_thread = JNI_FALSE; + JNIEnv* _env = JniUtil::getJniEnv(tlb->jvm, &attached_thread); + if (_env != nullptr) { + if (tlb->direct_buffer) { + void* buf = _env->GetDirectBufferAddress(tlb->jbuf); + delete[] static_cast(buf); + } + _env->DeleteGlobalRef(tlb->jbuf); + JniUtil::releaseJniEnv(tlb->jvm, attached_thread); + } + }; + + m_tl_buf_a = new ThreadLocalPtr(unref); + m_tl_buf_b = new ThreadLocalPtr(unref); + + m_jcompare_buf_a = nullptr; + m_jcompare_buf_b = nullptr; + m_jshortest_buf_start = nullptr; + m_jshortest_buf_limit = nullptr; + m_jshort_buf_key = nullptr; + + } else { + // buffers reused and shared across threads + const bool adaptive = m_options->reused_synchronisation_type == + ReusedSynchronisationType::ADAPTIVE_MUTEX; + mtx_compare = std::unique_ptr(new port::Mutex(adaptive)); + mtx_shortest = std::unique_ptr(new port::Mutex(adaptive)); + mtx_short = std::unique_ptr(new port::Mutex(adaptive)); + + m_jcompare_buf_a = env->NewGlobalRef(ByteBufferJni::construct( + env, m_options->direct_buffer, m_options->max_reused_buffer_size, + m_jbytebuffer_clazz)); + if (m_jcompare_buf_a == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + m_jcompare_buf_b = env->NewGlobalRef(ByteBufferJni::construct( + env, m_options->direct_buffer, m_options->max_reused_buffer_size, + m_jbytebuffer_clazz)); + if (m_jcompare_buf_b == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + m_jshortest_buf_start = env->NewGlobalRef(ByteBufferJni::construct( + env, m_options->direct_buffer, m_options->max_reused_buffer_size, + m_jbytebuffer_clazz)); + if (m_jshortest_buf_start == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + m_jshortest_buf_limit = env->NewGlobalRef(ByteBufferJni::construct( + env, m_options->direct_buffer, m_options->max_reused_buffer_size, + m_jbytebuffer_clazz)); + if (m_jshortest_buf_limit == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + m_jshort_buf_key = env->NewGlobalRef(ByteBufferJni::construct( + env, m_options->direct_buffer, m_options->max_reused_buffer_size, + m_jbytebuffer_clazz)); + if (m_jshort_buf_key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + m_tl_buf_a = nullptr; + m_tl_buf_b = nullptr; + } + + } else { + m_jcompare_buf_a = nullptr; + m_jcompare_buf_b = nullptr; + m_jshortest_buf_start = nullptr; + m_jshortest_buf_limit = nullptr; + m_jshort_buf_key = nullptr; + + m_tl_buf_a = nullptr; + m_tl_buf_b = nullptr; + } +} + +ComparatorJniCallback::~ComparatorJniCallback() { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + assert(env != nullptr); + + env->DeleteGlobalRef(m_abstract_comparator_jni_bridge_clazz); + + env->DeleteGlobalRef(m_jbytebuffer_clazz); + + if (m_jcompare_buf_a != nullptr) { + if (m_options->direct_buffer) { + void* buf = env->GetDirectBufferAddress(m_jcompare_buf_a); + delete[] static_cast(buf); + } + env->DeleteGlobalRef(m_jcompare_buf_a); + } + + if (m_jcompare_buf_b != nullptr) { + if (m_options->direct_buffer) { + void* buf = env->GetDirectBufferAddress(m_jcompare_buf_b); + delete[] static_cast(buf); + } + env->DeleteGlobalRef(m_jcompare_buf_b); + } + + if (m_jshortest_buf_start != nullptr) { + if (m_options->direct_buffer) { + void* buf = env->GetDirectBufferAddress(m_jshortest_buf_start); + delete[] static_cast(buf); + } + env->DeleteGlobalRef(m_jshortest_buf_start); + } + + if (m_jshortest_buf_limit != nullptr) { + if (m_options->direct_buffer) { + void* buf = env->GetDirectBufferAddress(m_jshortest_buf_limit); + delete[] static_cast(buf); + } + env->DeleteGlobalRef(m_jshortest_buf_limit); + } + + if (m_jshort_buf_key != nullptr) { + if (m_options->direct_buffer) { + void* buf = env->GetDirectBufferAddress(m_jshort_buf_key); + delete[] static_cast(buf); + } + env->DeleteGlobalRef(m_jshort_buf_key); + } + + if (m_tl_buf_a != nullptr) { + delete m_tl_buf_a; + } + + if (m_tl_buf_b != nullptr) { + delete m_tl_buf_b; + } + + releaseJniEnv(attached_thread); +} + +const char* ComparatorJniCallback::Name() const { return m_name.get(); } + +int ComparatorJniCallback::Compare(const Slice& a, const Slice& b) const { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + assert(env != nullptr); + + const bool reuse_jbuf_a = + static_cast(a.size()) <= m_options->max_reused_buffer_size; + const bool reuse_jbuf_b = + static_cast(b.size()) <= m_options->max_reused_buffer_size; + + MaybeLockForReuse(mtx_compare, reuse_jbuf_a || reuse_jbuf_b); + + jobject jcompare_buf_a = + GetBuffer(env, a, reuse_jbuf_a, m_tl_buf_a, m_jcompare_buf_a); + if (jcompare_buf_a == nullptr) { + // exception occurred + MaybeUnlockForReuse(mtx_compare, reuse_jbuf_a || reuse_jbuf_b); + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return 0; + } + + jobject jcompare_buf_b = + GetBuffer(env, b, reuse_jbuf_b, m_tl_buf_b, m_jcompare_buf_b); + if (jcompare_buf_b == nullptr) { + // exception occurred + if (!reuse_jbuf_a) { + DeleteBuffer(env, jcompare_buf_a); + } + MaybeUnlockForReuse(mtx_compare, reuse_jbuf_a || reuse_jbuf_b); + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return 0; + } + + jint result = env->CallStaticIntMethod( + m_abstract_comparator_jni_bridge_clazz, m_jcompare_mid, m_jcallback_obj, + jcompare_buf_a, reuse_jbuf_a ? a.size() : -1, jcompare_buf_b, + reuse_jbuf_b ? b.size() : -1); + + if (env->ExceptionCheck()) { + // exception thrown from CallIntMethod + env->ExceptionDescribe(); // print out exception to stderr + result = 0; // we could not get a result from java callback so use 0 + } + + if (!reuse_jbuf_a) { + DeleteBuffer(env, jcompare_buf_a); + } + if (!reuse_jbuf_b) { + DeleteBuffer(env, jcompare_buf_b); + } + + MaybeUnlockForReuse(mtx_compare, reuse_jbuf_a || reuse_jbuf_b); + + releaseJniEnv(attached_thread); + + return result; +} + +void ComparatorJniCallback::FindShortestSeparator(std::string* start, + const Slice& limit) const { + if (start == nullptr) { + return; + } + + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + assert(env != nullptr); + + const bool reuse_jbuf_start = static_cast(start->length()) <= + m_options->max_reused_buffer_size; + const bool reuse_jbuf_limit = + static_cast(limit.size()) <= m_options->max_reused_buffer_size; + + MaybeLockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit); + + Slice sstart(start->data(), start->length()); + jobject j_start_buf = GetBuffer(env, sstart, reuse_jbuf_start, m_tl_buf_a, + m_jshortest_buf_start); + if (j_start_buf == nullptr) { + // exception occurred + MaybeUnlockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit); + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + + jobject j_limit_buf = GetBuffer(env, limit, reuse_jbuf_limit, m_tl_buf_b, + m_jshortest_buf_limit); + if (j_limit_buf == nullptr) { + // exception occurred + if (!reuse_jbuf_start) { + DeleteBuffer(env, j_start_buf); + } + MaybeUnlockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit); + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + + jint jstart_len = env->CallStaticIntMethod( + m_abstract_comparator_jni_bridge_clazz, m_jshortest_mid, m_jcallback_obj, + j_start_buf, reuse_jbuf_start ? start->length() : -1, j_limit_buf, + reuse_jbuf_limit ? limit.size() : -1); + + if (env->ExceptionCheck()) { + // exception thrown from CallIntMethod + env->ExceptionDescribe(); // print out exception to stderr + + } else if (static_cast(jstart_len) != start->length()) { + // start buffer has changed in Java, so update `start` with the result + bool copy_from_non_direct = false; + if (reuse_jbuf_start) { + // reused a buffer + if (m_options->direct_buffer) { + // reused direct buffer + void* start_buf = env->GetDirectBufferAddress(j_start_buf); + if (start_buf == nullptr) { + if (!reuse_jbuf_start) { + DeleteBuffer(env, j_start_buf); + } + if (!reuse_jbuf_limit) { + DeleteBuffer(env, j_limit_buf); + } + MaybeUnlockForReuse(mtx_shortest, + reuse_jbuf_start || reuse_jbuf_limit); + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, "Unable to get Direct Buffer Address"); + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + start->assign(static_cast(start_buf), jstart_len); + + } else { + // reused non-direct buffer + copy_from_non_direct = true; + } + } else { + // there was a new buffer + if (m_options->direct_buffer) { + // it was direct... don't forget to potentially truncate the `start` + // string + start->resize(jstart_len); + } else { + // it was non-direct + copy_from_non_direct = true; + } + } + + if (copy_from_non_direct) { + jbyteArray jarray = + ByteBufferJni::array(env, j_start_buf, m_jbytebuffer_clazz); + if (jarray == nullptr) { + if (!reuse_jbuf_start) { + DeleteBuffer(env, j_start_buf); + } + if (!reuse_jbuf_limit) { + DeleteBuffer(env, j_limit_buf); + } + MaybeUnlockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit); + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + jboolean has_exception = JNI_FALSE; + JniUtil::byteString( + env, jarray, + [start, jstart_len](const char* data, const size_t) { + return start->assign(data, static_cast(jstart_len)); + }, + &has_exception); + env->DeleteLocalRef(jarray); + if (has_exception == JNI_TRUE) { + if (!reuse_jbuf_start) { + DeleteBuffer(env, j_start_buf); + } + if (!reuse_jbuf_limit) { + DeleteBuffer(env, j_limit_buf); + } + env->ExceptionDescribe(); // print out exception to stderr + MaybeUnlockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit); + releaseJniEnv(attached_thread); + return; + } + } + } + + if (!reuse_jbuf_start) { + DeleteBuffer(env, j_start_buf); + } + if (!reuse_jbuf_limit) { + DeleteBuffer(env, j_limit_buf); + } + + MaybeUnlockForReuse(mtx_shortest, reuse_jbuf_start || reuse_jbuf_limit); + + releaseJniEnv(attached_thread); +} + +void ComparatorJniCallback::FindShortSuccessor(std::string* key) const { + if (key == nullptr) { + return; + } + + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + assert(env != nullptr); + + const bool reuse_jbuf_key = + static_cast(key->length()) <= m_options->max_reused_buffer_size; + + MaybeLockForReuse(mtx_short, reuse_jbuf_key); + + Slice skey(key->data(), key->length()); + jobject j_key_buf = + GetBuffer(env, skey, reuse_jbuf_key, m_tl_buf_a, m_jshort_buf_key); + if (j_key_buf == nullptr) { + // exception occurred + MaybeUnlockForReuse(mtx_short, reuse_jbuf_key); + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + + jint jkey_len = env->CallStaticIntMethod( + m_abstract_comparator_jni_bridge_clazz, m_jshort_mid, m_jcallback_obj, + j_key_buf, reuse_jbuf_key ? key->length() : -1); + + if (env->ExceptionCheck()) { + // exception thrown from CallObjectMethod + if (!reuse_jbuf_key) { + DeleteBuffer(env, j_key_buf); + } + MaybeUnlockForReuse(mtx_short, reuse_jbuf_key); + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + + if (static_cast(jkey_len) != key->length()) { + // key buffer has changed in Java, so update `key` with the result + bool copy_from_non_direct = false; + if (reuse_jbuf_key) { + // reused a buffer + if (m_options->direct_buffer) { + // reused direct buffer + void* key_buf = env->GetDirectBufferAddress(j_key_buf); + if (key_buf == nullptr) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, "Unable to get Direct Buffer Address"); + if (!reuse_jbuf_key) { + DeleteBuffer(env, j_key_buf); + } + MaybeUnlockForReuse(mtx_short, reuse_jbuf_key); + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + key->assign(static_cast(key_buf), jkey_len); + } else { + // reused non-direct buffer + copy_from_non_direct = true; + } + } else { + // there was a new buffer + if (m_options->direct_buffer) { + // it was direct... don't forget to potentially truncate the `key` + // string + key->resize(jkey_len); + } else { + // it was non-direct + copy_from_non_direct = true; + } + } + + if (copy_from_non_direct) { + jbyteArray jarray = + ByteBufferJni::array(env, j_key_buf, m_jbytebuffer_clazz); + if (jarray == nullptr) { + if (!reuse_jbuf_key) { + DeleteBuffer(env, j_key_buf); + } + MaybeUnlockForReuse(mtx_short, reuse_jbuf_key); + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + jboolean has_exception = JNI_FALSE; + JniUtil::byteString( + env, jarray, + [key, jkey_len](const char* data, const size_t) { + return key->assign(data, static_cast(jkey_len)); + }, + &has_exception); + env->DeleteLocalRef(jarray); + if (has_exception == JNI_TRUE) { + if (!reuse_jbuf_key) { + DeleteBuffer(env, j_key_buf); + } + MaybeUnlockForReuse(mtx_short, reuse_jbuf_key); + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + } + } + + if (!reuse_jbuf_key) { + DeleteBuffer(env, j_key_buf); + } + + MaybeUnlockForReuse(mtx_short, reuse_jbuf_key); + + releaseJniEnv(attached_thread); +} + +inline void ComparatorJniCallback::MaybeLockForReuse( + const std::unique_ptr& mutex, const bool cond) const { + // no need to lock if using thread_local + if (m_options->reused_synchronisation_type != + ReusedSynchronisationType::THREAD_LOCAL && + cond) { + mutex.get()->Lock(); + } +} + +inline void ComparatorJniCallback::MaybeUnlockForReuse( + const std::unique_ptr& mutex, const bool cond) const { + // no need to unlock if using thread_local + if (m_options->reused_synchronisation_type != + ReusedSynchronisationType::THREAD_LOCAL && + cond) { + mutex.get()->Unlock(); + } +} + +jobject ComparatorJniCallback::GetBuffer(JNIEnv* env, const Slice& src, + bool reuse_buffer, + ThreadLocalPtr* tl_buf, + jobject jreuse_buffer) const { + if (reuse_buffer) { + if (m_options->reused_synchronisation_type == + ReusedSynchronisationType::THREAD_LOCAL) { + // reuse thread-local bufffer + ThreadLocalBuf* tlb = reinterpret_cast(tl_buf->Get()); + if (tlb == nullptr) { + // thread-local buffer has not yet been created, so create it + jobject jtl_buf = env->NewGlobalRef(ByteBufferJni::construct( + env, m_options->direct_buffer, m_options->max_reused_buffer_size, + m_jbytebuffer_clazz)); + if (jtl_buf == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + tlb = new ThreadLocalBuf(m_jvm, m_options->direct_buffer, jtl_buf); + tl_buf->Reset(tlb); + } + return ReuseBuffer(env, src, tlb->jbuf); + } else { + // reuse class member buffer + return ReuseBuffer(env, src, jreuse_buffer); + } + } else { + // new buffer + return NewBuffer(env, src); + } +} + +jobject ComparatorJniCallback::ReuseBuffer(JNIEnv* env, const Slice& src, + jobject jreuse_buffer) const { + // we can reuse the buffer + if (m_options->direct_buffer) { + // copy into direct buffer + void* buf = env->GetDirectBufferAddress(jreuse_buffer); + if (buf == nullptr) { + // either memory region is undefined, given object is not a direct + // java.nio.Buffer, or JNI access to direct buffers is not supported by + // this virtual machine. + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, "Unable to get Direct Buffer Address"); + return nullptr; + } + memcpy(buf, src.data(), src.size()); + } else { + // copy into non-direct buffer + const jbyteArray jarray = + ByteBufferJni::array(env, jreuse_buffer, m_jbytebuffer_clazz); + if (jarray == nullptr) { + // exception occurred + return nullptr; + } + env->SetByteArrayRegion( + jarray, 0, static_cast(src.size()), + const_cast(reinterpret_cast(src.data()))); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(jarray); + return nullptr; + } + env->DeleteLocalRef(jarray); + } + return jreuse_buffer; +} + +jobject ComparatorJniCallback::NewBuffer(JNIEnv* env, const Slice& src) const { + // we need a new buffer + jobject jbuf = + ByteBufferJni::constructWith(env, m_options->direct_buffer, src.data(), + src.size(), m_jbytebuffer_clazz); + if (jbuf == nullptr) { + // exception occurred + return nullptr; + } + return jbuf; +} + +void ComparatorJniCallback::DeleteBuffer(JNIEnv* env, jobject jbuffer) const { + env->DeleteLocalRef(jbuffer); +} + +} // namespace ROCKSDB_NAMESPACE diff --git a/src/rocksdb/java/rocksjni/comparatorjnicallback.h b/src/rocksdb/java/rocksjni/comparatorjnicallback.h new file mode 100644 index 000000000..a983ce4b5 --- /dev/null +++ b/src/rocksdb/java/rocksjni/comparatorjnicallback.h @@ -0,0 +1,141 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Comparator + +#ifndef JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_ +#define JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_ + +#include + +#include +#include + +#include "port/port.h" +#include "rocksdb/comparator.h" +#include "rocksdb/slice.h" +#include "rocksjni/jnicallback.h" +#include "util/thread_local.h" + +namespace ROCKSDB_NAMESPACE { + +enum ReusedSynchronisationType { + /** + * Standard mutex. + */ + MUTEX, + + /** + * Use adaptive mutex, which spins in the user space before resorting + * to kernel. This could reduce context switch when the mutex is not + * heavily contended. However, if the mutex is hot, we could end up + * wasting spin time. + */ + ADAPTIVE_MUTEX, + + /** + * There is a reused buffer per-thread. + */ + THREAD_LOCAL +}; + +struct ComparatorJniCallbackOptions { + // Set the synchronisation type used to guard the reused buffers. + // Only used if max_reused_buffer_size > 0. + // Default: ADAPTIVE_MUTEX + ReusedSynchronisationType reused_synchronisation_type = + ReusedSynchronisationType::ADAPTIVE_MUTEX; + + // Indicates if a direct byte buffer (i.e. outside of the normal + // garbage-collected heap) is used for the callbacks to Java, + // as opposed to a non-direct byte buffer which is a wrapper around + // an on-heap byte[]. + // Default: true + bool direct_buffer = true; + + // Maximum size of a buffer (in bytes) that will be reused. + // Comparators will use 5 of these buffers, + // so the retained memory size will be 5 * max_reused_buffer_size. + // When a buffer is needed for transferring data to a callback, + // if it requires less than max_reused_buffer_size, then an + // existing buffer will be reused, else a new buffer will be + // allocated just for that callback. -1 to disable. + // Default: 64 bytes + int32_t max_reused_buffer_size = 64; +}; + +/** + * This class acts as a bridge between C++ + * and Java. The methods in this class will be + * called back from the RocksDB storage engine (C++) + * we then callback to the appropriate Java method + * this enables Comparators to be implemented in Java. + * + * The design of this Comparator caches the Java Slice + * objects that are used in the compare and findShortestSeparator + * method callbacks. Instead of creating new objects for each callback + * of those functions, by reuse via setHandle we are a lot + * faster; Unfortunately this means that we have to + * introduce independent locking in regions of each of those methods + * via the mutexs mtx_compare and mtx_findShortestSeparator respectively + */ +class ComparatorJniCallback : public JniCallback, public Comparator { + public: + ComparatorJniCallback(JNIEnv* env, jobject jcomparator, + const ComparatorJniCallbackOptions* options); + ~ComparatorJniCallback(); + virtual const char* Name() const; + virtual int Compare(const Slice& a, const Slice& b) const; + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const; + virtual void FindShortSuccessor(std::string* key) const; + const ComparatorJniCallbackOptions* m_options; + + private: + struct ThreadLocalBuf { + ThreadLocalBuf(JavaVM* _jvm, bool _direct_buffer, jobject _jbuf) + : jvm(_jvm), direct_buffer(_direct_buffer), jbuf(_jbuf) {} + JavaVM* jvm; + bool direct_buffer; + jobject jbuf; + }; + inline void MaybeLockForReuse(const std::unique_ptr& mutex, + const bool cond) const; + inline void MaybeUnlockForReuse(const std::unique_ptr& mutex, + const bool cond) const; + jobject GetBuffer(JNIEnv* env, const Slice& src, bool reuse_buffer, + ThreadLocalPtr* tl_buf, jobject jreuse_buffer) const; + jobject ReuseBuffer(JNIEnv* env, const Slice& src, + jobject jreuse_buffer) const; + jobject NewBuffer(JNIEnv* env, const Slice& src) const; + void DeleteBuffer(JNIEnv* env, jobject jbuffer) const; + // used for synchronisation in compare method + std::unique_ptr mtx_compare; + // used for synchronisation in findShortestSeparator method + std::unique_ptr mtx_shortest; + // used for synchronisation in findShortSuccessor method + std::unique_ptr mtx_short; + std::unique_ptr m_name; + jclass m_abstract_comparator_jni_bridge_clazz; // TODO(AR) could we make this + // static somehow? + jclass m_jbytebuffer_clazz; // TODO(AR) we could cache this globally for the + // entire VM if we switch more APIs to use + // ByteBuffer // TODO(AR) could we make this + // static somehow? + jmethodID m_jcompare_mid; // TODO(AR) could we make this static somehow? + jmethodID m_jshortest_mid; // TODO(AR) could we make this static somehow? + jmethodID m_jshort_mid; // TODO(AR) could we make this static somehow? + jobject m_jcompare_buf_a; + jobject m_jcompare_buf_b; + jobject m_jshortest_buf_start; + jobject m_jshortest_buf_limit; + jobject m_jshort_buf_key; + ThreadLocalPtr* m_tl_buf_a; + ThreadLocalPtr* m_tl_buf_b; +}; +} // namespace ROCKSDB_NAMESPACE + +#endif // JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_ diff --git a/src/rocksdb/java/rocksjni/compression_options.cc b/src/rocksdb/java/rocksjni/compression_options.cc new file mode 100644 index 000000000..53f240560 --- /dev/null +++ b/src/rocksdb/java/rocksjni/compression_options.cc @@ -0,0 +1,214 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompressionOptions. + +#include + +#include "include/org_rocksdb_CompressionOptions.h" +#include "rocksdb/advanced_options.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_CompressionOptions + * Method: newCompressionOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_CompressionOptions_newCompressionOptions(JNIEnv*, + jclass) { + const auto* opt = new ROCKSDB_NAMESPACE::CompressionOptions(); + return GET_CPLUSPLUS_POINTER(opt); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setWindowBits + * Signature: (JI)V + */ +void Java_org_rocksdb_CompressionOptions_setWindowBits(JNIEnv*, jobject, + jlong jhandle, + jint jwindow_bits) { + auto* opt = reinterpret_cast(jhandle); + opt->window_bits = static_cast(jwindow_bits); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: windowBits + * Signature: (J)I + */ +jint Java_org_rocksdb_CompressionOptions_windowBits(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->window_bits); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setLevel + * Signature: (JI)V + */ +void Java_org_rocksdb_CompressionOptions_setLevel(JNIEnv*, jobject, + jlong jhandle, jint jlevel) { + auto* opt = reinterpret_cast(jhandle); + opt->level = static_cast(jlevel); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: level + * Signature: (J)I + */ +jint Java_org_rocksdb_CompressionOptions_level(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->level); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setStrategy + * Signature: (JI)V + */ +void Java_org_rocksdb_CompressionOptions_setStrategy(JNIEnv*, jobject, + jlong jhandle, + jint jstrategy) { + auto* opt = reinterpret_cast(jhandle); + opt->strategy = static_cast(jstrategy); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: strategy + * Signature: (J)I + */ +jint Java_org_rocksdb_CompressionOptions_strategy(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->strategy); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setMaxDictBytes + * Signature: (JI)V + */ +void Java_org_rocksdb_CompressionOptions_setMaxDictBytes(JNIEnv*, jobject, + jlong jhandle, + jint jmax_dict_bytes) { + auto* opt = reinterpret_cast(jhandle); + opt->max_dict_bytes = static_cast(jmax_dict_bytes); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: maxDictBytes + * Signature: (J)I + */ +jint Java_org_rocksdb_CompressionOptions_maxDictBytes(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_dict_bytes); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setZstdMaxTrainBytes + * Signature: (JI)V + */ +void Java_org_rocksdb_CompressionOptions_setZstdMaxTrainBytes( + JNIEnv*, jobject, jlong jhandle, jint jzstd_max_train_bytes) { + auto* opt = reinterpret_cast(jhandle); + opt->zstd_max_train_bytes = static_cast(jzstd_max_train_bytes); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: zstdMaxTrainBytes + * Signature: (J)I + */ +jint Java_org_rocksdb_CompressionOptions_zstdMaxTrainBytes(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->zstd_max_train_bytes); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setMaxDictBufferBytes + * Signature: (JJ)V + */ +void Java_org_rocksdb_CompressionOptions_setMaxDictBufferBytes( + JNIEnv*, jobject, jlong jhandle, jlong jmax_dict_buffer_bytes) { + auto* opt = reinterpret_cast(jhandle); + opt->max_dict_buffer_bytes = static_cast(jmax_dict_buffer_bytes); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: maxDictBufferBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_CompressionOptions_maxDictBufferBytes(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_dict_buffer_bytes); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setZstdMaxTrainBytes + * Signature: (JZ)V + */ +void Java_org_rocksdb_CompressionOptions_setUseZstdDictTrainer( + JNIEnv*, jobject, jlong jhandle, jboolean juse_zstd_dict_trainer) { + auto* opt = reinterpret_cast(jhandle); + opt->use_zstd_dict_trainer = juse_zstd_dict_trainer == JNI_TRUE; +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: zstdMaxTrainBytes + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_CompressionOptions_useZstdDictTrainer(JNIEnv*, + jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->use_zstd_dict_trainer); +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: setEnabled + * Signature: (JZ)V + */ +void Java_org_rocksdb_CompressionOptions_setEnabled(JNIEnv*, jobject, + jlong jhandle, + jboolean jenabled) { + auto* opt = reinterpret_cast(jhandle); + opt->enabled = jenabled == JNI_TRUE; +} + +/* + * Class: org_rocksdb_CompressionOptions + * Method: enabled + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_CompressionOptions_enabled(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->enabled); +} +/* + * Class: org_rocksdb_CompressionOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_CompressionOptions_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/src/rocksdb/java/rocksjni/concurrent_task_limiter.cc b/src/rocksdb/java/rocksjni/concurrent_task_limiter.cc new file mode 100644 index 000000000..0b0b2d271 --- /dev/null +++ b/src/rocksdb/java/rocksjni/concurrent_task_limiter.cc @@ -0,0 +1,97 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include "rocksdb/concurrent_task_limiter.h" + +#include + +#include +#include + +#include "include/org_rocksdb_ConcurrentTaskLimiterImpl.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_ConcurrentTaskLimiterImpl + * Method: newConcurrentTaskLimiterImpl0 + * Signature: (Ljava/lang/String;I)J + */ +jlong Java_org_rocksdb_ConcurrentTaskLimiterImpl_newConcurrentTaskLimiterImpl0( + JNIEnv* env, jclass, jstring jname, jint limit) { + jboolean has_exception = JNI_FALSE; + std::string name = + ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jname, &has_exception); + if (JNI_TRUE == has_exception) { + return 0; + } + + auto* ptr = new std::shared_ptr( + ROCKSDB_NAMESPACE::NewConcurrentTaskLimiter(name, limit)); + + return GET_CPLUSPLUS_POINTER(ptr); +} + +/* + * Class: org_rocksdb_ConcurrentTaskLimiterImpl + * Method: name + * Signature: (J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ConcurrentTaskLimiterImpl_name(JNIEnv* env, jclass, + jlong handle) { + const auto& limiter = *reinterpret_cast< + std::shared_ptr*>(handle); + return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &limiter->GetName()); +} + +/* + * Class: org_rocksdb_ConcurrentTaskLimiterImpl + * Method: setMaxOutstandingTask + * Signature: (JI)V + */ +void Java_org_rocksdb_ConcurrentTaskLimiterImpl_setMaxOutstandingTask( + JNIEnv*, jclass, jlong handle, jint max_outstanding_task) { + const auto& limiter = *reinterpret_cast< + std::shared_ptr*>(handle); + limiter->SetMaxOutstandingTask(static_cast(max_outstanding_task)); +} + +/* + * Class: org_rocksdb_ConcurrentTaskLimiterImpl + * Method: resetMaxOutstandingTask + * Signature: (J)V + */ +void Java_org_rocksdb_ConcurrentTaskLimiterImpl_resetMaxOutstandingTask( + JNIEnv*, jclass, jlong handle) { + const auto& limiter = *reinterpret_cast< + std::shared_ptr*>(handle); + limiter->ResetMaxOutstandingTask(); +} + +/* + * Class: org_rocksdb_ConcurrentTaskLimiterImpl + * Method: outstandingTask + * Signature: (J)I + */ +jint Java_org_rocksdb_ConcurrentTaskLimiterImpl_outstandingTask(JNIEnv*, jclass, + jlong handle) { + const auto& limiter = *reinterpret_cast< + std::shared_ptr*>(handle); + return static_cast(limiter->GetOutstandingTask()); +} + +/* + * Class: org_rocksdb_ConcurrentTaskLimiterImpl + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ConcurrentTaskLimiterImpl_disposeInternal(JNIEnv*, + jobject, + jlong jhandle) { + auto* ptr = reinterpret_cast< + std::shared_ptr*>(jhandle); + delete ptr; // delete std::shared_ptr +} diff --git a/src/rocksdb/java/rocksjni/config_options.cc b/src/rocksdb/java/rocksjni/config_options.cc new file mode 100644 index 000000000..e62111323 --- /dev/null +++ b/src/rocksdb/java/rocksjni/config_options.cc @@ -0,0 +1,90 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling C++ ROCKSDB_NAMESPACE::ConfigOptions methods +// from Java side. + +#include + +#include "include/org_rocksdb_ConfigOptions.h" +#include "rocksdb/convenience.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_ConfigOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ConfigOptions_disposeInternal(JNIEnv *, jobject, + jlong jhandle) { + auto *co = reinterpret_cast(jhandle); + assert(co != nullptr); + delete co; +} + +/* + * Class: org_rocksdb_ConfigOptions + * Method: newConfigOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_ConfigOptions_newConfigOptions(JNIEnv *, jclass) { + auto *cfg_opt = new ROCKSDB_NAMESPACE::ConfigOptions(); + return GET_CPLUSPLUS_POINTER(cfg_opt); +} + +/* + * Class: org_rocksdb_ConfigOptions + * Method: setDelimiter + * Signature: (JLjava/lang/String;)V + */ +void Java_org_rocksdb_ConfigOptions_setDelimiter(JNIEnv *env, jclass, + jlong handle, jstring s) { + auto *cfg_opt = reinterpret_cast(handle); + const char *delim = env->GetStringUTFChars(s, nullptr); + if (delim == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + cfg_opt->delimiter = delim; + env->ReleaseStringUTFChars(s, delim); +} + +/* + * Class: org_rocksdb_ConfigOptions + * Method: setIgnoreUnknownOptions + * Signature: (JZ)V + */ +void Java_org_rocksdb_ConfigOptions_setIgnoreUnknownOptions(JNIEnv *, jclass, + jlong handle, + jboolean b) { + auto *cfg_opt = reinterpret_cast(handle); + cfg_opt->ignore_unknown_options = static_cast(b); +} + +/* + * Class: org_rocksdb_ConfigOptions + * Method: setInputStringsEscaped + * Signature: (JZ)V + */ +void Java_org_rocksdb_ConfigOptions_setInputStringsEscaped(JNIEnv *, jclass, + jlong handle, + jboolean b) { + auto *cfg_opt = reinterpret_cast(handle); + cfg_opt->input_strings_escaped = static_cast(b); +} + +/* + * Class: org_rocksdb_ConfigOptions + * Method: setSanityLevel + * Signature: (JI)V + */ +void Java_org_rocksdb_ConfigOptions_setSanityLevel(JNIEnv *, jclass, + jlong handle, jbyte level) { + auto *cfg_opt = reinterpret_cast(handle); + cfg_opt->sanity_level = + ROCKSDB_NAMESPACE::SanityLevelJni::toCppSanityLevel(level); +} diff --git a/src/rocksdb/java/rocksjni/cplusplus_to_java_convert.h b/src/rocksdb/java/rocksjni/cplusplus_to_java_convert.h new file mode 100644 index 000000000..0eea6fa2c --- /dev/null +++ b/src/rocksdb/java/rocksjni/cplusplus_to_java_convert.h @@ -0,0 +1,37 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#pragma once + +/* + * This macro is used for 32 bit OS. In 32 bit OS, the result number is a + negative number if we use reinterpret_cast(pointer). + * For example, jlong ptr = reinterpret_cast(pointer), ptr is a negative + number in 32 bit OS. + * If we check ptr using ptr > 0, it fails. For example, the following code is + not correct. + * if (jblock_cache_handle > 0) { + std::shared_ptr *pCache = + reinterpret_cast *>( + jblock_cache_handle); + options.block_cache = *pCache; + } + * But the result number is positive number if we do + reinterpret_cast(pointer) first and then cast it to jlong. size_t is 4 + bytes long in 32 bit OS and 8 bytes long in 64 bit OS. + static_cast(reinterpret_cast(_pointer)) is also working in 64 + bit OS. + * + * We don't need an opposite cast because it works from jlong to c++ pointer in + both 32 bit and 64 bit OS. + * For example, the following code is working in both 32 bit and 64 bit OS. + jblock_cache_handle is jlong. + * std::shared_ptr *pCache = + reinterpret_cast *>( + jblock_cache_handle); +*/ + +#define GET_CPLUSPLUS_POINTER(_pointer) \ + static_cast(reinterpret_cast(_pointer)) diff --git a/src/rocksdb/java/rocksjni/env.cc b/src/rocksdb/java/rocksjni/env.cc new file mode 100644 index 000000000..bb739fe2b --- /dev/null +++ b/src/rocksdb/java/rocksjni/env.cc @@ -0,0 +1,205 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::Env methods from Java side. + +#include "rocksdb/env.h" + +#include + +#include + +#include "include/org_rocksdb_Env.h" +#include "include/org_rocksdb_RocksEnv.h" +#include "include/org_rocksdb_RocksMemEnv.h" +#include "include/org_rocksdb_TimedEnv.h" +#include "portal.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_Env + * Method: getDefaultEnvInternal + * Signature: ()J + */ +jlong Java_org_rocksdb_Env_getDefaultEnvInternal(JNIEnv*, jclass) { + return GET_CPLUSPLUS_POINTER(ROCKSDB_NAMESPACE::Env::Default()); +} + +/* + * Class: org_rocksdb_RocksEnv + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RocksEnv_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* e = reinterpret_cast(jhandle); + assert(e != nullptr); + delete e; +} + +/* + * Class: org_rocksdb_Env + * Method: setBackgroundThreads + * Signature: (JIB)V + */ +void Java_org_rocksdb_Env_setBackgroundThreads(JNIEnv*, jobject, jlong jhandle, + jint jnum, + jbyte jpriority_value) { + auto* rocks_env = reinterpret_cast(jhandle); + rocks_env->SetBackgroundThreads( + static_cast(jnum), + ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value)); +} + +/* + * Class: org_rocksdb_Env + * Method: getBackgroundThreads + * Signature: (JB)I + */ +jint Java_org_rocksdb_Env_getBackgroundThreads(JNIEnv*, jobject, jlong jhandle, + jbyte jpriority_value) { + auto* rocks_env = reinterpret_cast(jhandle); + const int num = rocks_env->GetBackgroundThreads( + ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value)); + return static_cast(num); +} + +/* + * Class: org_rocksdb_Env + * Method: getThreadPoolQueueLen + * Signature: (JB)I + */ +jint Java_org_rocksdb_Env_getThreadPoolQueueLen(JNIEnv*, jobject, jlong jhandle, + jbyte jpriority_value) { + auto* rocks_env = reinterpret_cast(jhandle); + const int queue_len = rocks_env->GetThreadPoolQueueLen( + ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value)); + return static_cast(queue_len); +} + +/* + * Class: org_rocksdb_Env + * Method: incBackgroundThreadsIfNeeded + * Signature: (JIB)V + */ +void Java_org_rocksdb_Env_incBackgroundThreadsIfNeeded(JNIEnv*, jobject, + jlong jhandle, jint jnum, + jbyte jpriority_value) { + auto* rocks_env = reinterpret_cast(jhandle); + rocks_env->IncBackgroundThreadsIfNeeded( + static_cast(jnum), + ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value)); +} + +/* + * Class: org_rocksdb_Env + * Method: lowerThreadPoolIOPriority + * Signature: (JB)V + */ +void Java_org_rocksdb_Env_lowerThreadPoolIOPriority(JNIEnv*, jobject, + jlong jhandle, + jbyte jpriority_value) { + auto* rocks_env = reinterpret_cast(jhandle); + rocks_env->LowerThreadPoolIOPriority( + ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value)); +} + +/* + * Class: org_rocksdb_Env + * Method: lowerThreadPoolCPUPriority + * Signature: (JB)V + */ +void Java_org_rocksdb_Env_lowerThreadPoolCPUPriority(JNIEnv*, jobject, + jlong jhandle, + jbyte jpriority_value) { + auto* rocks_env = reinterpret_cast(jhandle); + rocks_env->LowerThreadPoolCPUPriority( + ROCKSDB_NAMESPACE::PriorityJni::toCppPriority(jpriority_value)); +} + +/* + * Class: org_rocksdb_Env + * Method: getThreadList + * Signature: (J)[Lorg/rocksdb/ThreadStatus; + */ +jobjectArray Java_org_rocksdb_Env_getThreadList(JNIEnv* env, jobject, + jlong jhandle) { + auto* rocks_env = reinterpret_cast(jhandle); + std::vector thread_status; + ROCKSDB_NAMESPACE::Status s = rocks_env->GetThreadList(&thread_status); + if (!s.ok()) { + // error, throw exception + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + // object[] + const jsize len = static_cast(thread_status.size()); + jobjectArray jthread_status = env->NewObjectArray( + len, ROCKSDB_NAMESPACE::ThreadStatusJni::getJClass(env), nullptr); + if (jthread_status == nullptr) { + // an exception occurred + return nullptr; + } + for (jsize i = 0; i < len; ++i) { + jobject jts = + ROCKSDB_NAMESPACE::ThreadStatusJni::construct(env, &(thread_status[i])); + env->SetObjectArrayElement(jthread_status, i, jts); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(jthread_status); + return nullptr; + } + } + + return jthread_status; +} + +/* + * Class: org_rocksdb_RocksMemEnv + * Method: createMemEnv + * Signature: (J)J + */ +jlong Java_org_rocksdb_RocksMemEnv_createMemEnv(JNIEnv*, jclass, + jlong jbase_env_handle) { + auto* base_env = reinterpret_cast(jbase_env_handle); + return GET_CPLUSPLUS_POINTER(ROCKSDB_NAMESPACE::NewMemEnv(base_env)); +} + +/* + * Class: org_rocksdb_RocksMemEnv + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RocksMemEnv_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* e = reinterpret_cast(jhandle); + assert(e != nullptr); + delete e; +} + +/* + * Class: org_rocksdb_TimedEnv + * Method: createTimedEnv + * Signature: (J)J + */ +jlong Java_org_rocksdb_TimedEnv_createTimedEnv(JNIEnv*, jclass, + jlong jbase_env_handle) { + auto* base_env = reinterpret_cast(jbase_env_handle); + return GET_CPLUSPLUS_POINTER(ROCKSDB_NAMESPACE::NewTimedEnv(base_env)); +} + +/* + * Class: org_rocksdb_TimedEnv + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TimedEnv_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* e = reinterpret_cast(jhandle); + assert(e != nullptr); + delete e; +} diff --git a/src/rocksdb/java/rocksjni/env_options.cc b/src/rocksdb/java/rocksjni/env_options.cc new file mode 100644 index 000000000..3237e2775 --- /dev/null +++ b/src/rocksdb/java/rocksjni/env_options.cc @@ -0,0 +1,305 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling C++ ROCKSDB_NAMESPACE::EnvOptions methods +// from Java side. + +#include + +#include "include/org_rocksdb_EnvOptions.h" +#include "rocksdb/env.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +#define ENV_OPTIONS_SET_BOOL(_jhandle, _opt) \ + reinterpret_cast(_jhandle)->_opt = \ + static_cast(_opt) + +#define ENV_OPTIONS_SET_SIZE_T(_jhandle, _opt) \ + reinterpret_cast(_jhandle)->_opt = \ + static_cast(_opt) + +#define ENV_OPTIONS_SET_UINT64_T(_jhandle, _opt) \ + reinterpret_cast(_jhandle)->_opt = \ + static_cast(_opt) + +#define ENV_OPTIONS_GET(_jhandle, _opt) \ + reinterpret_cast(_jhandle)->_opt + +/* + * Class: org_rocksdb_EnvOptions + * Method: newEnvOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_EnvOptions_newEnvOptions__(JNIEnv *, jclass) { + auto *env_opt = new ROCKSDB_NAMESPACE::EnvOptions(); + return GET_CPLUSPLUS_POINTER(env_opt); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: newEnvOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_EnvOptions_newEnvOptions__J(JNIEnv *, jclass, + jlong jdboptions_handle) { + auto *db_options = + reinterpret_cast(jdboptions_handle); + auto *env_opt = new ROCKSDB_NAMESPACE::EnvOptions(*db_options); + return GET_CPLUSPLUS_POINTER(env_opt); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_EnvOptions_disposeInternal(JNIEnv *, jobject, + jlong jhandle) { + auto *eo = reinterpret_cast(jhandle); + assert(eo != nullptr); + delete eo; +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setUseMmapReads + * Signature: (JZ)V + */ +void Java_org_rocksdb_EnvOptions_setUseMmapReads(JNIEnv *, jobject, + jlong jhandle, + jboolean use_mmap_reads) { + ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_reads); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: useMmapReads + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_EnvOptions_useMmapReads(JNIEnv *, jobject, + jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, use_mmap_reads); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setUseMmapWrites + * Signature: (JZ)V + */ +void Java_org_rocksdb_EnvOptions_setUseMmapWrites(JNIEnv *, jobject, + jlong jhandle, + jboolean use_mmap_writes) { + ENV_OPTIONS_SET_BOOL(jhandle, use_mmap_writes); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: useMmapWrites + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_EnvOptions_useMmapWrites(JNIEnv *, jobject, + jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, use_mmap_writes); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setUseDirectReads + * Signature: (JZ)V + */ +void Java_org_rocksdb_EnvOptions_setUseDirectReads(JNIEnv *, jobject, + jlong jhandle, + jboolean use_direct_reads) { + ENV_OPTIONS_SET_BOOL(jhandle, use_direct_reads); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: useDirectReads + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_EnvOptions_useDirectReads(JNIEnv *, jobject, + jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, use_direct_reads); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setUseDirectWrites + * Signature: (JZ)V + */ +void Java_org_rocksdb_EnvOptions_setUseDirectWrites( + JNIEnv *, jobject, jlong jhandle, jboolean use_direct_writes) { + ENV_OPTIONS_SET_BOOL(jhandle, use_direct_writes); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: useDirectWrites + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_EnvOptions_useDirectWrites(JNIEnv *, jobject, + jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, use_direct_writes); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setAllowFallocate + * Signature: (JZ)V + */ +void Java_org_rocksdb_EnvOptions_setAllowFallocate(JNIEnv *, jobject, + jlong jhandle, + jboolean allow_fallocate) { + ENV_OPTIONS_SET_BOOL(jhandle, allow_fallocate); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: allowFallocate + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_EnvOptions_allowFallocate(JNIEnv *, jobject, + jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, allow_fallocate); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setSetFdCloexec + * Signature: (JZ)V + */ +void Java_org_rocksdb_EnvOptions_setSetFdCloexec(JNIEnv *, jobject, + jlong jhandle, + jboolean set_fd_cloexec) { + ENV_OPTIONS_SET_BOOL(jhandle, set_fd_cloexec); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setFdCloexec + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_EnvOptions_setFdCloexec(JNIEnv *, jobject, + jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, set_fd_cloexec); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setBytesPerSync + * Signature: (JJ)V + */ +void Java_org_rocksdb_EnvOptions_setBytesPerSync(JNIEnv *, jobject, + jlong jhandle, + jlong bytes_per_sync) { + ENV_OPTIONS_SET_UINT64_T(jhandle, bytes_per_sync); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: bytesPerSync + * Signature: (J)J + */ +jlong Java_org_rocksdb_EnvOptions_bytesPerSync(JNIEnv *, jobject, + jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, bytes_per_sync); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setFallocateWithKeepSize + * Signature: (JZ)V + */ +void Java_org_rocksdb_EnvOptions_setFallocateWithKeepSize( + JNIEnv *, jobject, jlong jhandle, jboolean fallocate_with_keep_size) { + ENV_OPTIONS_SET_BOOL(jhandle, fallocate_with_keep_size); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: fallocateWithKeepSize + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_EnvOptions_fallocateWithKeepSize(JNIEnv *, jobject, + jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, fallocate_with_keep_size); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setCompactionReadaheadSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_EnvOptions_setCompactionReadaheadSize( + JNIEnv *, jobject, jlong jhandle, jlong compaction_readahead_size) { + ENV_OPTIONS_SET_SIZE_T(jhandle, compaction_readahead_size); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: compactionReadaheadSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_EnvOptions_compactionReadaheadSize(JNIEnv *, jobject, + jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, compaction_readahead_size); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setRandomAccessMaxBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_EnvOptions_setRandomAccessMaxBufferSize( + JNIEnv *, jobject, jlong jhandle, jlong random_access_max_buffer_size) { + ENV_OPTIONS_SET_SIZE_T(jhandle, random_access_max_buffer_size); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: randomAccessMaxBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_EnvOptions_randomAccessMaxBufferSize(JNIEnv *, jobject, + jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, random_access_max_buffer_size); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setWritableFileMaxBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_EnvOptions_setWritableFileMaxBufferSize( + JNIEnv *, jobject, jlong jhandle, jlong writable_file_max_buffer_size) { + ENV_OPTIONS_SET_SIZE_T(jhandle, writable_file_max_buffer_size); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: writableFileMaxBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_EnvOptions_writableFileMaxBufferSize(JNIEnv *, jobject, + jlong jhandle) { + return ENV_OPTIONS_GET(jhandle, writable_file_max_buffer_size); +} + +/* + * Class: org_rocksdb_EnvOptions + * Method: setRateLimiter + * Signature: (JJ)V + */ +void Java_org_rocksdb_EnvOptions_setRateLimiter(JNIEnv *, jobject, + jlong jhandle, + jlong rl_handle) { + auto *sptr_rate_limiter = + reinterpret_cast *>( + rl_handle); + auto *env_opt = reinterpret_cast(jhandle); + env_opt->rate_limiter = sptr_rate_limiter->get(); +} diff --git a/src/rocksdb/java/rocksjni/event_listener.cc b/src/rocksdb/java/rocksjni/event_listener.cc new file mode 100644 index 000000000..965932c9c --- /dev/null +++ b/src/rocksdb/java/rocksjni/event_listener.cc @@ -0,0 +1,44 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::EventListener. + +#include + +#include + +#include "include/org_rocksdb_AbstractEventListener.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/event_listener_jnicallback.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_AbstractEventListener + * Method: createNewEventListener + * Signature: (J)J + */ +jlong Java_org_rocksdb_AbstractEventListener_createNewEventListener( + JNIEnv* env, jobject jobj, jlong jenabled_event_callback_values) { + auto enabled_event_callbacks = + ROCKSDB_NAMESPACE::EnabledEventCallbackJni::toCppEnabledEventCallbacks( + jenabled_event_callback_values); + auto* sptr_event_listener = + new std::shared_ptr( + new ROCKSDB_NAMESPACE::EventListenerJniCallback( + env, jobj, enabled_event_callbacks)); + return GET_CPLUSPLUS_POINTER(sptr_event_listener); +} + +/* + * Class: org_rocksdb_AbstractEventListener + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_AbstractEventListener_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + delete reinterpret_cast*>( + jhandle); +} diff --git a/src/rocksdb/java/rocksjni/event_listener_jnicallback.cc b/src/rocksdb/java/rocksjni/event_listener_jnicallback.cc new file mode 100644 index 000000000..342d938b4 --- /dev/null +++ b/src/rocksdb/java/rocksjni/event_listener_jnicallback.cc @@ -0,0 +1,502 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::EventListener. + +#include "rocksjni/event_listener_jnicallback.h" + +#include "rocksjni/portal.h" + +namespace ROCKSDB_NAMESPACE { +EventListenerJniCallback::EventListenerJniCallback( + JNIEnv* env, jobject jevent_listener, + const std::set& enabled_event_callbacks) + : JniCallback(env, jevent_listener), + m_enabled_event_callbacks(enabled_event_callbacks) { + InitCallbackMethodId( + m_on_flush_completed_proxy_mid, EnabledEventCallback::ON_FLUSH_COMPLETED, + env, AbstractEventListenerJni::getOnFlushCompletedProxyMethodId); + + InitCallbackMethodId(m_on_flush_begin_proxy_mid, + EnabledEventCallback::ON_FLUSH_BEGIN, env, + AbstractEventListenerJni::getOnFlushBeginProxyMethodId); + + InitCallbackMethodId(m_on_table_file_deleted_mid, + EnabledEventCallback::ON_TABLE_FILE_DELETED, env, + AbstractEventListenerJni::getOnTableFileDeletedMethodId); + + InitCallbackMethodId( + m_on_compaction_begin_proxy_mid, + EnabledEventCallback::ON_COMPACTION_BEGIN, env, + AbstractEventListenerJni::getOnCompactionBeginProxyMethodId); + + InitCallbackMethodId( + m_on_compaction_completed_proxy_mid, + EnabledEventCallback::ON_COMPACTION_COMPLETED, env, + AbstractEventListenerJni::getOnCompactionCompletedProxyMethodId); + + InitCallbackMethodId(m_on_table_file_created_mid, + EnabledEventCallback::ON_TABLE_FILE_CREATED, env, + AbstractEventListenerJni::getOnTableFileCreatedMethodId); + + InitCallbackMethodId( + m_on_table_file_creation_started_mid, + EnabledEventCallback::ON_TABLE_FILE_CREATION_STARTED, env, + AbstractEventListenerJni::getOnTableFileCreationStartedMethodId); + + InitCallbackMethodId(m_on_mem_table_sealed_mid, + EnabledEventCallback::ON_MEMTABLE_SEALED, env, + AbstractEventListenerJni::getOnMemTableSealedMethodId); + + InitCallbackMethodId( + m_on_column_family_handle_deletion_started_mid, + EnabledEventCallback::ON_COLUMN_FAMILY_HANDLE_DELETION_STARTED, env, + AbstractEventListenerJni::getOnColumnFamilyHandleDeletionStartedMethodId); + + InitCallbackMethodId( + m_on_external_file_ingested_proxy_mid, + EnabledEventCallback::ON_EXTERNAL_FILE_INGESTED, env, + AbstractEventListenerJni::getOnExternalFileIngestedProxyMethodId); + + InitCallbackMethodId( + m_on_background_error_proxy_mid, + EnabledEventCallback::ON_BACKGROUND_ERROR, env, + AbstractEventListenerJni::getOnBackgroundErrorProxyMethodId); + + InitCallbackMethodId( + m_on_stall_conditions_changed_mid, + EnabledEventCallback::ON_STALL_CONDITIONS_CHANGED, env, + AbstractEventListenerJni::getOnStallConditionsChangedMethodId); + + InitCallbackMethodId(m_on_file_read_finish_mid, + EnabledEventCallback::ON_FILE_READ_FINISH, env, + AbstractEventListenerJni::getOnFileReadFinishMethodId); + + InitCallbackMethodId(m_on_file_write_finish_mid, + EnabledEventCallback::ON_FILE_WRITE_FINISH, env, + AbstractEventListenerJni::getOnFileWriteFinishMethodId); + + InitCallbackMethodId(m_on_file_flush_finish_mid, + EnabledEventCallback::ON_FILE_FLUSH_FINISH, env, + AbstractEventListenerJni::getOnFileFlushFinishMethodId); + + InitCallbackMethodId(m_on_file_sync_finish_mid, + EnabledEventCallback::ON_FILE_SYNC_FINISH, env, + AbstractEventListenerJni::getOnFileSyncFinishMethodId); + + InitCallbackMethodId( + m_on_file_range_sync_finish_mid, + EnabledEventCallback::ON_FILE_RANGE_SYNC_FINISH, env, + AbstractEventListenerJni::getOnFileRangeSyncFinishMethodId); + + InitCallbackMethodId( + m_on_file_truncate_finish_mid, + EnabledEventCallback::ON_FILE_TRUNCATE_FINISH, env, + AbstractEventListenerJni::getOnFileTruncateFinishMethodId); + + InitCallbackMethodId(m_on_file_close_finish_mid, + EnabledEventCallback::ON_FILE_CLOSE_FINISH, env, + AbstractEventListenerJni::getOnFileCloseFinishMethodId); + + InitCallbackMethodId( + m_should_be_notified_on_file_io, + EnabledEventCallback::SHOULD_BE_NOTIFIED_ON_FILE_IO, env, + AbstractEventListenerJni::getShouldBeNotifiedOnFileIOMethodId); + + InitCallbackMethodId( + m_on_error_recovery_begin_proxy_mid, + EnabledEventCallback::ON_ERROR_RECOVERY_BEGIN, env, + AbstractEventListenerJni::getOnErrorRecoveryBeginProxyMethodId); + + InitCallbackMethodId( + m_on_error_recovery_completed_mid, + EnabledEventCallback::ON_ERROR_RECOVERY_COMPLETED, env, + AbstractEventListenerJni::getOnErrorRecoveryCompletedMethodId); +} + +EventListenerJniCallback::~EventListenerJniCallback() {} + +void EventListenerJniCallback::OnFlushCompleted( + DB* db, const FlushJobInfo& flush_job_info) { + if (m_on_flush_completed_proxy_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jflush_job_info = SetupCallbackInvocation( + env, attached_thread, flush_job_info, + FlushJobInfoJni::fromCppFlushJobInfo); + + if (jflush_job_info != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_flush_completed_proxy_mid, + reinterpret_cast(db), jflush_job_info); + } + + CleanupCallbackInvocation(env, attached_thread, {&jflush_job_info}); +} + +void EventListenerJniCallback::OnFlushBegin( + DB* db, const FlushJobInfo& flush_job_info) { + if (m_on_flush_begin_proxy_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jflush_job_info = SetupCallbackInvocation( + env, attached_thread, flush_job_info, + FlushJobInfoJni::fromCppFlushJobInfo); + + if (jflush_job_info != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_flush_begin_proxy_mid, + reinterpret_cast(db), jflush_job_info); + } + + CleanupCallbackInvocation(env, attached_thread, {&jflush_job_info}); +} + +void EventListenerJniCallback::OnTableFileDeleted( + const TableFileDeletionInfo& info) { + if (m_on_table_file_deleted_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jdeletion_info = SetupCallbackInvocation( + env, attached_thread, info, + TableFileDeletionInfoJni::fromCppTableFileDeletionInfo); + + if (jdeletion_info != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_table_file_deleted_mid, + jdeletion_info); + } + + CleanupCallbackInvocation(env, attached_thread, {&jdeletion_info}); +} + +void EventListenerJniCallback::OnCompactionBegin(DB* db, + const CompactionJobInfo& ci) { + if (m_on_compaction_begin_proxy_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jcompaction_job_info = SetupCallbackInvocation( + env, attached_thread, ci, CompactionJobInfoJni::fromCppCompactionJobInfo); + + if (jcompaction_job_info != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_compaction_begin_proxy_mid, + reinterpret_cast(db), jcompaction_job_info); + } + + CleanupCallbackInvocation(env, attached_thread, {&jcompaction_job_info}); +} + +void EventListenerJniCallback::OnCompactionCompleted( + DB* db, const CompactionJobInfo& ci) { + if (m_on_compaction_completed_proxy_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jcompaction_job_info = SetupCallbackInvocation( + env, attached_thread, ci, CompactionJobInfoJni::fromCppCompactionJobInfo); + + if (jcompaction_job_info != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_compaction_completed_proxy_mid, + reinterpret_cast(db), jcompaction_job_info); + } + + CleanupCallbackInvocation(env, attached_thread, {&jcompaction_job_info}); +} + +void EventListenerJniCallback::OnTableFileCreated( + const TableFileCreationInfo& info) { + if (m_on_table_file_created_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jfile_creation_info = SetupCallbackInvocation( + env, attached_thread, info, + TableFileCreationInfoJni::fromCppTableFileCreationInfo); + + if (jfile_creation_info != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_table_file_created_mid, + jfile_creation_info); + } + + CleanupCallbackInvocation(env, attached_thread, {&jfile_creation_info}); +} + +void EventListenerJniCallback::OnTableFileCreationStarted( + const TableFileCreationBriefInfo& info) { + if (m_on_table_file_creation_started_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jcreation_brief_info = + SetupCallbackInvocation( + env, attached_thread, info, + TableFileCreationBriefInfoJni::fromCppTableFileCreationBriefInfo); + + if (jcreation_brief_info != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_table_file_creation_started_mid, + jcreation_brief_info); + } + + CleanupCallbackInvocation(env, attached_thread, {&jcreation_brief_info}); +} + +void EventListenerJniCallback::OnMemTableSealed(const MemTableInfo& info) { + if (m_on_mem_table_sealed_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jmem_table_info = SetupCallbackInvocation( + env, attached_thread, info, MemTableInfoJni::fromCppMemTableInfo); + + if (jmem_table_info != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_mem_table_sealed_mid, + jmem_table_info); + } + + CleanupCallbackInvocation(env, attached_thread, {&jmem_table_info}); +} + +void EventListenerJniCallback::OnColumnFamilyHandleDeletionStarted( + ColumnFamilyHandle* handle) { + if (m_on_column_family_handle_deletion_started_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jcf_handle = SetupCallbackInvocation( + env, attached_thread, *handle, + ColumnFamilyHandleJni::fromCppColumnFamilyHandle); + + if (jcf_handle != nullptr) { + env->CallVoidMethod(m_jcallback_obj, + m_on_column_family_handle_deletion_started_mid, + jcf_handle); + } + + CleanupCallbackInvocation(env, attached_thread, {&jcf_handle}); +} + +void EventListenerJniCallback::OnExternalFileIngested( + DB* db, const ExternalFileIngestionInfo& info) { + if (m_on_external_file_ingested_proxy_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jingestion_info = SetupCallbackInvocation( + env, attached_thread, info, + ExternalFileIngestionInfoJni::fromCppExternalFileIngestionInfo); + + if (jingestion_info != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_external_file_ingested_proxy_mid, + reinterpret_cast(db), jingestion_info); + } + + CleanupCallbackInvocation(env, attached_thread, {&jingestion_info}); +} + +void EventListenerJniCallback::OnBackgroundError(BackgroundErrorReason reason, + Status* bg_error) { + if (m_on_background_error_proxy_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jstatus = SetupCallbackInvocation( + env, attached_thread, *bg_error, StatusJni::construct); + + if (jstatus != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_background_error_proxy_mid, + static_cast(reason), jstatus); + } + + CleanupCallbackInvocation(env, attached_thread, {&jstatus}); +} + +void EventListenerJniCallback::OnStallConditionsChanged( + const WriteStallInfo& info) { + if (m_on_stall_conditions_changed_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jwrite_stall_info = SetupCallbackInvocation( + env, attached_thread, info, WriteStallInfoJni::fromCppWriteStallInfo); + + if (jwrite_stall_info != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_stall_conditions_changed_mid, + jwrite_stall_info); + } + + CleanupCallbackInvocation(env, attached_thread, {&jwrite_stall_info}); +} + +void EventListenerJniCallback::OnFileReadFinish(const FileOperationInfo& info) { + OnFileOperation(m_on_file_read_finish_mid, info); +} + +void EventListenerJniCallback::OnFileWriteFinish( + const FileOperationInfo& info) { + OnFileOperation(m_on_file_write_finish_mid, info); +} + +void EventListenerJniCallback::OnFileFlushFinish( + const FileOperationInfo& info) { + OnFileOperation(m_on_file_flush_finish_mid, info); +} + +void EventListenerJniCallback::OnFileSyncFinish(const FileOperationInfo& info) { + OnFileOperation(m_on_file_sync_finish_mid, info); +} + +void EventListenerJniCallback::OnFileRangeSyncFinish( + const FileOperationInfo& info) { + OnFileOperation(m_on_file_range_sync_finish_mid, info); +} + +void EventListenerJniCallback::OnFileTruncateFinish( + const FileOperationInfo& info) { + OnFileOperation(m_on_file_truncate_finish_mid, info); +} + +void EventListenerJniCallback::OnFileCloseFinish( + const FileOperationInfo& info) { + OnFileOperation(m_on_file_close_finish_mid, info); +} + +bool EventListenerJniCallback::ShouldBeNotifiedOnFileIO() { + if (m_should_be_notified_on_file_io == nullptr) { + return false; + } + + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + assert(env != nullptr); + + jboolean jshould_be_notified = + env->CallBooleanMethod(m_jcallback_obj, m_should_be_notified_on_file_io); + + CleanupCallbackInvocation(env, attached_thread, {}); + + return static_cast(jshould_be_notified); +} + +void EventListenerJniCallback::OnErrorRecoveryBegin( + BackgroundErrorReason reason, Status bg_error, bool* auto_recovery) { + if (m_on_error_recovery_begin_proxy_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jbg_error = SetupCallbackInvocation( + env, attached_thread, bg_error, StatusJni::construct); + + if (jbg_error != nullptr) { + jboolean jauto_recovery = env->CallBooleanMethod( + m_jcallback_obj, m_on_error_recovery_begin_proxy_mid, + static_cast(reason), jbg_error); + *auto_recovery = jauto_recovery == JNI_TRUE; + } + + CleanupCallbackInvocation(env, attached_thread, {&jbg_error}); +} + +void EventListenerJniCallback::OnErrorRecoveryCompleted(Status old_bg_error) { + if (m_on_error_recovery_completed_mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jold_bg_error = SetupCallbackInvocation( + env, attached_thread, old_bg_error, StatusJni::construct); + + if (jold_bg_error != nullptr) { + env->CallVoidMethod(m_jcallback_obj, m_on_error_recovery_completed_mid, + jold_bg_error); + } + + CleanupCallbackInvocation(env, attached_thread, {&jold_bg_error}); +} + +void EventListenerJniCallback::InitCallbackMethodId( + jmethodID& mid, EnabledEventCallback eec, JNIEnv* env, + jmethodID (*get_id)(JNIEnv* env)) { + if (m_enabled_event_callbacks.count(eec) == 1) { + mid = get_id(env); + } else { + mid = nullptr; + } +} + +template +jobject EventListenerJniCallback::SetupCallbackInvocation( + JNIEnv*& env, jboolean& attached_thread, const T& cpp_obj, + jobject (*convert)(JNIEnv* env, const T* cpp_obj)) { + attached_thread = JNI_FALSE; + env = getJniEnv(&attached_thread); + assert(env != nullptr); + + return convert(env, &cpp_obj); +} + +void EventListenerJniCallback::CleanupCallbackInvocation( + JNIEnv* env, jboolean attached_thread, + std::initializer_list refs) { + for (auto* ref : refs) { + if (*ref == nullptr) continue; + env->DeleteLocalRef(*ref); + } + + if (env->ExceptionCheck()) { + // exception thrown from CallVoidMethod + env->ExceptionDescribe(); // print out exception to stderr + } + + releaseJniEnv(attached_thread); +} + +void EventListenerJniCallback::OnFileOperation(const jmethodID& mid, + const FileOperationInfo& info) { + if (mid == nullptr) { + return; + } + + JNIEnv* env; + jboolean attached_thread; + jobject jop_info = SetupCallbackInvocation( + env, attached_thread, info, + FileOperationInfoJni::fromCppFileOperationInfo); + + if (jop_info != nullptr) { + env->CallVoidMethod(m_jcallback_obj, mid, jop_info); + } + + CleanupCallbackInvocation(env, attached_thread, {&jop_info}); +} +} // namespace ROCKSDB_NAMESPACE diff --git a/src/rocksdb/java/rocksjni/event_listener_jnicallback.h b/src/rocksdb/java/rocksjni/event_listener_jnicallback.h new file mode 100644 index 000000000..f4a235a23 --- /dev/null +++ b/src/rocksdb/java/rocksjni/event_listener_jnicallback.h @@ -0,0 +1,122 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::EventListener. + +#ifndef JAVA_ROCKSJNI_EVENT_LISTENER_JNICALLBACK_H_ +#define JAVA_ROCKSJNI_EVENT_LISTENER_JNICALLBACK_H_ + +#include + +#include +#include + +#include "rocksdb/listener.h" +#include "rocksjni/jnicallback.h" + +namespace ROCKSDB_NAMESPACE { + +enum EnabledEventCallback { + ON_FLUSH_COMPLETED = 0x0, + ON_FLUSH_BEGIN = 0x1, + ON_TABLE_FILE_DELETED = 0x2, + ON_COMPACTION_BEGIN = 0x3, + ON_COMPACTION_COMPLETED = 0x4, + ON_TABLE_FILE_CREATED = 0x5, + ON_TABLE_FILE_CREATION_STARTED = 0x6, + ON_MEMTABLE_SEALED = 0x7, + ON_COLUMN_FAMILY_HANDLE_DELETION_STARTED = 0x8, + ON_EXTERNAL_FILE_INGESTED = 0x9, + ON_BACKGROUND_ERROR = 0xA, + ON_STALL_CONDITIONS_CHANGED = 0xB, + ON_FILE_READ_FINISH = 0xC, + ON_FILE_WRITE_FINISH = 0xD, + ON_FILE_FLUSH_FINISH = 0xE, + ON_FILE_SYNC_FINISH = 0xF, + ON_FILE_RANGE_SYNC_FINISH = 0x10, + ON_FILE_TRUNCATE_FINISH = 0x11, + ON_FILE_CLOSE_FINISH = 0x12, + SHOULD_BE_NOTIFIED_ON_FILE_IO = 0x13, + ON_ERROR_RECOVERY_BEGIN = 0x14, + ON_ERROR_RECOVERY_COMPLETED = 0x15, + + NUM_ENABLED_EVENT_CALLBACK = 0x16, +}; + +class EventListenerJniCallback : public JniCallback, public EventListener { + public: + EventListenerJniCallback( + JNIEnv* env, jobject jevent_listener, + const std::set& enabled_event_callbacks); + virtual ~EventListenerJniCallback(); + virtual void OnFlushCompleted(DB* db, const FlushJobInfo& flush_job_info); + virtual void OnFlushBegin(DB* db, const FlushJobInfo& flush_job_info); + virtual void OnTableFileDeleted(const TableFileDeletionInfo& info); + virtual void OnCompactionBegin(DB* db, const CompactionJobInfo& ci); + virtual void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci); + virtual void OnTableFileCreated(const TableFileCreationInfo& info); + virtual void OnTableFileCreationStarted( + const TableFileCreationBriefInfo& info); + virtual void OnMemTableSealed(const MemTableInfo& info); + virtual void OnColumnFamilyHandleDeletionStarted(ColumnFamilyHandle* handle); + virtual void OnExternalFileIngested(DB* db, + const ExternalFileIngestionInfo& info); + virtual void OnBackgroundError(BackgroundErrorReason reason, + Status* bg_error); + virtual void OnStallConditionsChanged(const WriteStallInfo& info); + virtual void OnFileReadFinish(const FileOperationInfo& info); + virtual void OnFileWriteFinish(const FileOperationInfo& info); + virtual void OnFileFlushFinish(const FileOperationInfo& info); + virtual void OnFileSyncFinish(const FileOperationInfo& info); + virtual void OnFileRangeSyncFinish(const FileOperationInfo& info); + virtual void OnFileTruncateFinish(const FileOperationInfo& info); + virtual void OnFileCloseFinish(const FileOperationInfo& info); + virtual bool ShouldBeNotifiedOnFileIO(); + virtual void OnErrorRecoveryBegin(BackgroundErrorReason reason, + Status bg_error, bool* auto_recovery); + virtual void OnErrorRecoveryCompleted(Status old_bg_error); + + private: + inline void InitCallbackMethodId(jmethodID& mid, EnabledEventCallback eec, + JNIEnv* env, + jmethodID (*get_id)(JNIEnv* env)); + template + inline jobject SetupCallbackInvocation( + JNIEnv*& env, jboolean& attached_thread, const T& cpp_obj, + jobject (*convert)(JNIEnv* env, const T* cpp_obj)); + inline void CleanupCallbackInvocation(JNIEnv* env, jboolean attached_thread, + std::initializer_list refs); + inline void OnFileOperation(const jmethodID& mid, + const FileOperationInfo& info); + + const std::set m_enabled_event_callbacks; + jmethodID m_on_flush_completed_proxy_mid; + jmethodID m_on_flush_begin_proxy_mid; + jmethodID m_on_table_file_deleted_mid; + jmethodID m_on_compaction_begin_proxy_mid; + jmethodID m_on_compaction_completed_proxy_mid; + jmethodID m_on_table_file_created_mid; + jmethodID m_on_table_file_creation_started_mid; + jmethodID m_on_mem_table_sealed_mid; + jmethodID m_on_column_family_handle_deletion_started_mid; + jmethodID m_on_external_file_ingested_proxy_mid; + jmethodID m_on_background_error_proxy_mid; + jmethodID m_on_stall_conditions_changed_mid; + jmethodID m_on_file_read_finish_mid; + jmethodID m_on_file_write_finish_mid; + jmethodID m_on_file_flush_finish_mid; + jmethodID m_on_file_sync_finish_mid; + jmethodID m_on_file_range_sync_finish_mid; + jmethodID m_on_file_truncate_finish_mid; + jmethodID m_on_file_close_finish_mid; + jmethodID m_should_be_notified_on_file_io; + jmethodID m_on_error_recovery_begin_proxy_mid; + jmethodID m_on_error_recovery_completed_mid; +}; + +} // namespace ROCKSDB_NAMESPACE + +#endif // JAVA_ROCKSJNI_EVENT_LISTENER_JNICALLBACK_H_ diff --git a/src/rocksdb/java/rocksjni/filter.cc b/src/rocksdb/java/rocksjni/filter.cc new file mode 100644 index 000000000..ed22016d2 --- /dev/null +++ b/src/rocksdb/java/rocksjni/filter.cc @@ -0,0 +1,46 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::FilterPolicy. + +#include +#include +#include + +#include + +#include "include/org_rocksdb_BloomFilter.h" +#include "include/org_rocksdb_Filter.h" +#include "rocksdb/filter_policy.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_BloomFilter + * Method: createBloomFilter + * Signature: (DZ)J + */ +jlong Java_org_rocksdb_BloomFilter_createNewBloomFilter(JNIEnv* /*env*/, + jclass /*jcls*/, + jdouble bits_per_key) { + auto* sptr_filter = + new std::shared_ptr( + ROCKSDB_NAMESPACE::NewBloomFilterPolicy(bits_per_key)); + return GET_CPLUSPLUS_POINTER(sptr_filter); +} + +/* + * Class: org_rocksdb_Filter + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_Filter_disposeInternal(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jhandle) { + auto* handle = + reinterpret_cast*>( + jhandle); + delete handle; // delete std::shared_ptr +} diff --git a/src/rocksdb/java/rocksjni/ingest_external_file_options.cc b/src/rocksdb/java/rocksjni/ingest_external_file_options.cc new file mode 100644 index 000000000..052cf3325 --- /dev/null +++ b/src/rocksdb/java/rocksjni/ingest_external_file_options.cc @@ -0,0 +1,199 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::FilterPolicy. + +#include + +#include "include/org_rocksdb_IngestExternalFileOptions.h" +#include "rocksdb/options.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: newIngestExternalFileOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__( + JNIEnv*, jclass) { + auto* options = new ROCKSDB_NAMESPACE::IngestExternalFileOptions(); + return GET_CPLUSPLUS_POINTER(options); +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: newIngestExternalFileOptions + * Signature: (ZZZZ)J + */ +jlong Java_org_rocksdb_IngestExternalFileOptions_newIngestExternalFileOptions__ZZZZ( + JNIEnv*, jclass, jboolean jmove_files, jboolean jsnapshot_consistency, + jboolean jallow_global_seqno, jboolean jallow_blocking_flush) { + auto* options = new ROCKSDB_NAMESPACE::IngestExternalFileOptions(); + options->move_files = static_cast(jmove_files); + options->snapshot_consistency = static_cast(jsnapshot_consistency); + options->allow_global_seqno = static_cast(jallow_global_seqno); + options->allow_blocking_flush = static_cast(jallow_blocking_flush); + return GET_CPLUSPLUS_POINTER(options); +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: moveFiles + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_IngestExternalFileOptions_moveFiles(JNIEnv*, jobject, + jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return static_cast(options->move_files); +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: setMoveFiles + * Signature: (JZ)V + */ +void Java_org_rocksdb_IngestExternalFileOptions_setMoveFiles( + JNIEnv*, jobject, jlong jhandle, jboolean jmove_files) { + auto* options = + reinterpret_cast(jhandle); + options->move_files = static_cast(jmove_files); +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: snapshotConsistency + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_IngestExternalFileOptions_snapshotConsistency( + JNIEnv*, jobject, jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return static_cast(options->snapshot_consistency); +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: setSnapshotConsistency + * Signature: (JZ)V + */ +void Java_org_rocksdb_IngestExternalFileOptions_setSnapshotConsistency( + JNIEnv*, jobject, jlong jhandle, jboolean jsnapshot_consistency) { + auto* options = + reinterpret_cast(jhandle); + options->snapshot_consistency = static_cast(jsnapshot_consistency); +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: allowGlobalSeqNo + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_IngestExternalFileOptions_allowGlobalSeqNo( + JNIEnv*, jobject, jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return static_cast(options->allow_global_seqno); +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: setAllowGlobalSeqNo + * Signature: (JZ)V + */ +void Java_org_rocksdb_IngestExternalFileOptions_setAllowGlobalSeqNo( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_global_seqno) { + auto* options = + reinterpret_cast(jhandle); + options->allow_global_seqno = static_cast(jallow_global_seqno); +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: allowBlockingFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_IngestExternalFileOptions_allowBlockingFlush( + JNIEnv*, jobject, jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return static_cast(options->allow_blocking_flush); +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: setAllowBlockingFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_IngestExternalFileOptions_setAllowBlockingFlush( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_blocking_flush) { + auto* options = + reinterpret_cast(jhandle); + options->allow_blocking_flush = static_cast(jallow_blocking_flush); +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: ingestBehind + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_IngestExternalFileOptions_ingestBehind( + JNIEnv*, jobject, jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return options->ingest_behind == JNI_TRUE; +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: setIngestBehind + * Signature: (JZ)V + */ +void Java_org_rocksdb_IngestExternalFileOptions_setIngestBehind( + JNIEnv*, jobject, jlong jhandle, jboolean jingest_behind) { + auto* options = + reinterpret_cast(jhandle); + options->ingest_behind = jingest_behind == JNI_TRUE; +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: writeGlobalSeqno + * Signature: (J)Z + */ +JNIEXPORT jboolean JNICALL +Java_org_rocksdb_IngestExternalFileOptions_writeGlobalSeqno(JNIEnv*, jobject, + jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + return options->write_global_seqno == JNI_TRUE; +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: setWriteGlobalSeqno + * Signature: (JZ)V + */ +JNIEXPORT void JNICALL +Java_org_rocksdb_IngestExternalFileOptions_setWriteGlobalSeqno( + JNIEnv*, jobject, jlong jhandle, jboolean jwrite_global_seqno) { + auto* options = + reinterpret_cast(jhandle); + options->write_global_seqno = jwrite_global_seqno == JNI_TRUE; +} + +/* + * Class: org_rocksdb_IngestExternalFileOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_IngestExternalFileOptions_disposeInternal(JNIEnv*, + jobject, + jlong jhandle) { + auto* options = + reinterpret_cast(jhandle); + delete options; +} diff --git a/src/rocksdb/java/rocksjni/iterator.cc b/src/rocksdb/java/rocksjni/iterator.cc new file mode 100644 index 000000000..3ddb9778b --- /dev/null +++ b/src/rocksdb/java/rocksjni/iterator.cc @@ -0,0 +1,340 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::Iterator methods from Java side. + +#include "rocksdb/iterator.h" + +#include +#include +#include + +#include + +#include "include/org_rocksdb_RocksIterator.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_RocksIterator + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + assert(it != nullptr); + delete it; +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: isValid0 + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_RocksIterator_isValid0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + return reinterpret_cast(handle)->Valid(); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: seekToFirst0 + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_seekToFirst0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->SeekToFirst(); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: seekToLast0 + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_seekToLast0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->SeekToLast(); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: next0 + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_next0(JNIEnv* /*env*/, jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->Next(); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: prev0 + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_prev0(JNIEnv* /*env*/, jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->Prev(); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: refresh0 + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_refresh0(JNIEnv* env, jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Status s = it->Refresh(); + + if (s.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: seek0 + * Signature: (J[BI)V + */ +void Java_org_rocksdb_RocksIterator_seek0(JNIEnv* env, jobject /*jobj*/, + jlong handle, jbyteArray jtarget, + jint jtarget_len) { + auto* it = reinterpret_cast(handle); + auto seek = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) { + it->Seek(target_slice); + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_region(seek, env, jtarget, 0, jtarget_len); +} + +/* + * This method supports fetching into indirect byte buffers; + * the Java wrapper extracts the byte[] and passes it here. + * In this case, the buffer offset of the key may be non-zero. + * + * Class: org_rocksdb_RocksIterator + * Method: seek0 + * Signature: (J[BII)V + */ +void Java_org_rocksdb_RocksIterator_seekByteArray0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jbyteArray jtarget, + jint jtarget_off, jint jtarget_len) { + auto* it = reinterpret_cast(handle); + auto seek = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) { + it->Seek(target_slice); + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_region(seek, env, jtarget, jtarget_off, + jtarget_len); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: seekDirect0 + * Signature: (JLjava/nio/ByteBuffer;II)V + */ +void Java_org_rocksdb_RocksIterator_seekDirect0(JNIEnv* env, jobject /*jobj*/, + jlong handle, jobject jtarget, + jint jtarget_off, + jint jtarget_len) { + auto* it = reinterpret_cast(handle); + auto seek = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) { + it->Seek(target_slice); + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_direct(seek, env, jtarget, jtarget_off, + jtarget_len); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: seekForPrevDirect0 + * Signature: (JLjava/nio/ByteBuffer;II)V + */ +void Java_org_rocksdb_RocksIterator_seekForPrevDirect0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget, + jint jtarget_off, jint jtarget_len) { + auto* it = reinterpret_cast(handle); + auto seekPrev = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) { + it->SeekForPrev(target_slice); + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_direct(seekPrev, env, jtarget, jtarget_off, + jtarget_len); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: seekForPrev0 + * Signature: (J[BI)V + */ +void Java_org_rocksdb_RocksIterator_seekForPrev0(JNIEnv* env, jobject /*jobj*/, + jlong handle, + jbyteArray jtarget, + jint jtarget_len) { + auto* it = reinterpret_cast(handle); + auto seek = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) { + it->SeekForPrev(target_slice); + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_region(seek, env, jtarget, 0, jtarget_len); +} + +/* + * This method supports fetching into indirect byte buffers; + * the Java wrapper extracts the byte[] and passes it here. + * In this case, the buffer offset of the key may be non-zero. + * + * Class: org_rocksdb_RocksIterator + * Method: seek0 + * Signature: (J[BII)V + */ +void Java_org_rocksdb_RocksIterator_seekForPrevByteArray0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jbyteArray jtarget, + jint jtarget_off, jint jtarget_len) { + auto* it = reinterpret_cast(handle); + auto seek = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) { + it->SeekForPrev(target_slice); + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_region(seek, env, jtarget, jtarget_off, + jtarget_len); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: status0 + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_status0(JNIEnv* env, jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Status s = it->status(); + + if (s.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: key0 + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_RocksIterator_key0(JNIEnv* env, jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice key_slice = it->key(); + + jbyteArray jkey = env->NewByteArray(static_cast(key_slice.size())); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkey, 0, static_cast(key_slice.size()), + const_cast(reinterpret_cast(key_slice.data()))); + return jkey; +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: keyDirect0 + * Signature: (JLjava/nio/ByteBuffer;II)I + */ +jint Java_org_rocksdb_RocksIterator_keyDirect0(JNIEnv* env, jobject /*jobj*/, + jlong handle, jobject jtarget, + jint jtarget_off, + jint jtarget_len) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice key_slice = it->key(); + return ROCKSDB_NAMESPACE::JniUtil::copyToDirect(env, key_slice, jtarget, + jtarget_off, jtarget_len); +} + +/* + * This method supports fetching into indirect byte buffers; + * the Java wrapper extracts the byte[] and passes it here. + * + * Class: org_rocksdb_RocksIterator + * Method: keyByteArray0 + * Signature: (J[BII)I + */ +jint Java_org_rocksdb_RocksIterator_keyByteArray0(JNIEnv* env, jobject /*jobj*/, + jlong handle, jbyteArray jkey, + jint jkey_off, + jint jkey_len) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice key_slice = it->key(); + jsize copy_size = std::min(static_cast(key_slice.size()), + static_cast(jkey_len)); + env->SetByteArrayRegion( + jkey, jkey_off, copy_size, + const_cast(reinterpret_cast(key_slice.data()))); + + return static_cast(key_slice.size()); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: value0 + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_RocksIterator_value0(JNIEnv* env, jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice value_slice = it->value(); + + jbyteArray jkeyValue = + env->NewByteArray(static_cast(value_slice.size())); + if (jkeyValue == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkeyValue, 0, static_cast(value_slice.size()), + const_cast(reinterpret_cast(value_slice.data()))); + return jkeyValue; +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: valueDirect0 + * Signature: (JLjava/nio/ByteBuffer;II)I + */ +jint Java_org_rocksdb_RocksIterator_valueDirect0(JNIEnv* env, jobject /*jobj*/, + jlong handle, jobject jtarget, + jint jtarget_off, + jint jtarget_len) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice value_slice = it->value(); + return ROCKSDB_NAMESPACE::JniUtil::copyToDirect(env, value_slice, jtarget, + jtarget_off, jtarget_len); +} + +/* + * This method supports fetching into indirect byte buffers; + * the Java wrapper extracts the byte[] and passes it here. + * + * Class: org_rocksdb_RocksIterator + * Method: valueByteArray0 + * Signature: (J[BII)I + */ +jint Java_org_rocksdb_RocksIterator_valueByteArray0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jbyteArray jvalue_target, + jint jvalue_off, jint jvalue_len) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice value_slice = it->value(); + jsize copy_size = std::min(static_cast(value_slice.size()), + static_cast(jvalue_len)); + env->SetByteArrayRegion( + jvalue_target, jvalue_off, copy_size, + const_cast(reinterpret_cast(value_slice.data()))); + + return static_cast(value_slice.size()); +} diff --git a/src/rocksdb/java/rocksjni/jnicallback.cc b/src/rocksdb/java/rocksjni/jnicallback.cc new file mode 100644 index 000000000..f2742cd88 --- /dev/null +++ b/src/rocksdb/java/rocksjni/jnicallback.cc @@ -0,0 +1,54 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// JNI Callbacks from C++ to sub-classes or org.rocksdb.RocksCallbackObject + +#include "rocksjni/jnicallback.h" + +#include + +#include "rocksjni/portal.h" + +namespace ROCKSDB_NAMESPACE { +JniCallback::JniCallback(JNIEnv* env, jobject jcallback_obj) { + // Note: jcallback_obj may be accessed by multiple threads, + // so we ref the jvm not the env + const jint rs = env->GetJavaVM(&m_jvm); + if (rs != JNI_OK) { + // exception thrown + return; + } + + // Note: we may want to access the Java callback object instance + // across multiple method calls, so we create a global ref + assert(jcallback_obj != nullptr); + m_jcallback_obj = env->NewGlobalRef(jcallback_obj); + if (jcallback_obj == nullptr) { + // exception thrown: OutOfMemoryError + return; + } +} + +JNIEnv* JniCallback::getJniEnv(jboolean* attached) const { + return JniUtil::getJniEnv(m_jvm, attached); +} + +void JniCallback::releaseJniEnv(jboolean& attached) const { + JniUtil::releaseJniEnv(m_jvm, attached); +} + +JniCallback::~JniCallback() { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + assert(env != nullptr); + + if (m_jcallback_obj != nullptr) { + env->DeleteGlobalRef(m_jcallback_obj); + } + + releaseJniEnv(attached_thread); +} +} // namespace ROCKSDB_NAMESPACE diff --git a/src/rocksdb/java/rocksjni/jnicallback.h b/src/rocksdb/java/rocksjni/jnicallback.h new file mode 100644 index 000000000..a03a04128 --- /dev/null +++ b/src/rocksdb/java/rocksjni/jnicallback.h @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// JNI Callbacks from C++ to sub-classes or org.rocksdb.RocksCallbackObject + +#ifndef JAVA_ROCKSJNI_JNICALLBACK_H_ +#define JAVA_ROCKSJNI_JNICALLBACK_H_ + +#include + +#include "rocksdb/rocksdb_namespace.h" + +namespace ROCKSDB_NAMESPACE { +class JniCallback { + public: + JniCallback(JNIEnv* env, jobject jcallback_obj); + virtual ~JniCallback(); + + const jobject& GetJavaObject() const { return m_jcallback_obj; } + + protected: + JavaVM* m_jvm; + jobject m_jcallback_obj; + JNIEnv* getJniEnv(jboolean* attached) const; + void releaseJniEnv(jboolean& attached) const; +}; +} // namespace ROCKSDB_NAMESPACE + +#endif // JAVA_ROCKSJNI_JNICALLBACK_H_ diff --git a/src/rocksdb/java/rocksjni/loggerjnicallback.cc b/src/rocksdb/java/rocksjni/loggerjnicallback.cc new file mode 100644 index 000000000..aa9f95cd4 --- /dev/null +++ b/src/rocksdb/java/rocksjni/loggerjnicallback.cc @@ -0,0 +1,299 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Logger. + +#include "rocksjni/loggerjnicallback.h" + +#include +#include + +#include "include/org_rocksdb_Logger.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +namespace ROCKSDB_NAMESPACE { + +LoggerJniCallback::LoggerJniCallback(JNIEnv* env, jobject jlogger) + : JniCallback(env, jlogger) { + m_jLogMethodId = LoggerJni::getLogMethodId(env); + if (m_jLogMethodId == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + jobject jdebug_level = InfoLogLevelJni::DEBUG_LEVEL(env); + if (jdebug_level == nullptr) { + // exception thrown: NoSuchFieldError, ExceptionInInitializerError + // or OutOfMemoryError + return; + } + m_jdebug_level = env->NewGlobalRef(jdebug_level); + if (m_jdebug_level == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + jobject jinfo_level = InfoLogLevelJni::INFO_LEVEL(env); + if (jinfo_level == nullptr) { + // exception thrown: NoSuchFieldError, ExceptionInInitializerError + // or OutOfMemoryError + return; + } + m_jinfo_level = env->NewGlobalRef(jinfo_level); + if (m_jinfo_level == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + jobject jwarn_level = InfoLogLevelJni::WARN_LEVEL(env); + if (jwarn_level == nullptr) { + // exception thrown: NoSuchFieldError, ExceptionInInitializerError + // or OutOfMemoryError + return; + } + m_jwarn_level = env->NewGlobalRef(jwarn_level); + if (m_jwarn_level == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + jobject jerror_level = InfoLogLevelJni::ERROR_LEVEL(env); + if (jerror_level == nullptr) { + // exception thrown: NoSuchFieldError, ExceptionInInitializerError + // or OutOfMemoryError + return; + } + m_jerror_level = env->NewGlobalRef(jerror_level); + if (m_jerror_level == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + jobject jfatal_level = InfoLogLevelJni::FATAL_LEVEL(env); + if (jfatal_level == nullptr) { + // exception thrown: NoSuchFieldError, ExceptionInInitializerError + // or OutOfMemoryError + return; + } + m_jfatal_level = env->NewGlobalRef(jfatal_level); + if (m_jfatal_level == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + jobject jheader_level = InfoLogLevelJni::HEADER_LEVEL(env); + if (jheader_level == nullptr) { + // exception thrown: NoSuchFieldError, ExceptionInInitializerError + // or OutOfMemoryError + return; + } + m_jheader_level = env->NewGlobalRef(jheader_level); + if (m_jheader_level == nullptr) { + // exception thrown: OutOfMemoryError + return; + } +} + +void LoggerJniCallback::Logv(const char* /*format*/, va_list /*ap*/) { + // We implement this method because it is virtual but we don't + // use it because we need to know about the log level. +} + +void LoggerJniCallback::Logv(const InfoLogLevel log_level, const char* format, + va_list ap) { + if (GetInfoLogLevel() <= log_level) { + // determine InfoLogLevel java enum instance + jobject jlog_level; + switch (log_level) { + case ROCKSDB_NAMESPACE::InfoLogLevel::DEBUG_LEVEL: + jlog_level = m_jdebug_level; + break; + case ROCKSDB_NAMESPACE::InfoLogLevel::INFO_LEVEL: + jlog_level = m_jinfo_level; + break; + case ROCKSDB_NAMESPACE::InfoLogLevel::WARN_LEVEL: + jlog_level = m_jwarn_level; + break; + case ROCKSDB_NAMESPACE::InfoLogLevel::ERROR_LEVEL: + jlog_level = m_jerror_level; + break; + case ROCKSDB_NAMESPACE::InfoLogLevel::FATAL_LEVEL: + jlog_level = m_jfatal_level; + break; + case ROCKSDB_NAMESPACE::InfoLogLevel::HEADER_LEVEL: + jlog_level = m_jheader_level; + break; + default: + jlog_level = m_jfatal_level; + break; + } + + assert(format != nullptr); + const std::unique_ptr msg = format_str(format, ap); + + // pass msg to java callback handler + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + assert(env != nullptr); + + jstring jmsg = env->NewStringUTF(msg.get()); + if (jmsg == nullptr) { + // unable to construct string + if (env->ExceptionCheck()) { + env->ExceptionDescribe(); // print out exception to stderr + } + releaseJniEnv(attached_thread); + return; + } + if (env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + env->ExceptionDescribe(); // print out exception to stderr + env->DeleteLocalRef(jmsg); + releaseJniEnv(attached_thread); + return; + } + + env->CallVoidMethod(m_jcallback_obj, m_jLogMethodId, jlog_level, jmsg); + if (env->ExceptionCheck()) { + // exception thrown + env->ExceptionDescribe(); // print out exception to stderr + env->DeleteLocalRef(jmsg); + releaseJniEnv(attached_thread); + return; + } + + env->DeleteLocalRef(jmsg); + releaseJniEnv(attached_thread); + } +} + +std::unique_ptr LoggerJniCallback::format_str(const char* format, + va_list ap) const { + va_list ap_copy; + + va_copy(ap_copy, ap); + const size_t required = + vsnprintf(nullptr, 0, format, ap_copy) + 1; // Extra space for '\0' + va_end(ap_copy); + + std::unique_ptr buf(new char[required]); + + va_copy(ap_copy, ap); + vsnprintf(buf.get(), required, format, ap_copy); + va_end(ap_copy); + + return buf; +} +LoggerJniCallback::~LoggerJniCallback() { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + assert(env != nullptr); + + if (m_jdebug_level != nullptr) { + env->DeleteGlobalRef(m_jdebug_level); + } + + if (m_jinfo_level != nullptr) { + env->DeleteGlobalRef(m_jinfo_level); + } + + if (m_jwarn_level != nullptr) { + env->DeleteGlobalRef(m_jwarn_level); + } + + if (m_jerror_level != nullptr) { + env->DeleteGlobalRef(m_jerror_level); + } + + if (m_jfatal_level != nullptr) { + env->DeleteGlobalRef(m_jfatal_level); + } + + if (m_jheader_level != nullptr) { + env->DeleteGlobalRef(m_jheader_level); + } + + releaseJniEnv(attached_thread); +} + +} // namespace ROCKSDB_NAMESPACE + +/* + * Class: org_rocksdb_Logger + * Method: createNewLoggerOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_Logger_createNewLoggerOptions(JNIEnv* env, jobject jobj, + jlong joptions) { + auto* sptr_logger = new std::shared_ptr( + new ROCKSDB_NAMESPACE::LoggerJniCallback(env, jobj)); + + // set log level + auto* options = reinterpret_cast(joptions); + sptr_logger->get()->SetInfoLogLevel(options->info_log_level); + + return GET_CPLUSPLUS_POINTER(sptr_logger); +} + +/* + * Class: org_rocksdb_Logger + * Method: createNewLoggerDbOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_Logger_createNewLoggerDbOptions(JNIEnv* env, + jobject jobj, + jlong jdb_options) { + auto* sptr_logger = new std::shared_ptr( + new ROCKSDB_NAMESPACE::LoggerJniCallback(env, jobj)); + + // set log level + auto* db_options = + reinterpret_cast(jdb_options); + sptr_logger->get()->SetInfoLogLevel(db_options->info_log_level); + + return GET_CPLUSPLUS_POINTER(sptr_logger); +} + +/* + * Class: org_rocksdb_Logger + * Method: setInfoLogLevel + * Signature: (JB)V + */ +void Java_org_rocksdb_Logger_setInfoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jhandle, jbyte jlog_level) { + auto* handle = + reinterpret_cast*>( + jhandle); + handle->get()->SetInfoLogLevel( + static_cast(jlog_level)); +} + +/* + * Class: org_rocksdb_Logger + * Method: infoLogLevel + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Logger_infoLogLevel(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jhandle) { + auto* handle = + reinterpret_cast*>( + jhandle); + return static_cast(handle->get()->GetInfoLogLevel()); +} + +/* + * Class: org_rocksdb_Logger + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_Logger_disposeInternal(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jhandle) { + auto* handle = + reinterpret_cast*>( + jhandle); + delete handle; // delete std::shared_ptr +} diff --git a/src/rocksdb/java/rocksjni/loggerjnicallback.h b/src/rocksdb/java/rocksjni/loggerjnicallback.h new file mode 100644 index 000000000..57774988c --- /dev/null +++ b/src/rocksdb/java/rocksjni/loggerjnicallback.h @@ -0,0 +1,51 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Logger + +#ifndef JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_ +#define JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_ + +#include + +#include +#include + +#include "port/port.h" +#include "rocksdb/env.h" +#include "rocksjni/jnicallback.h" + +namespace ROCKSDB_NAMESPACE { + +class LoggerJniCallback : public JniCallback, public Logger { + public: + LoggerJniCallback(JNIEnv* env, jobject jLogger); + ~LoggerJniCallback(); + + using Logger::GetInfoLogLevel; + using Logger::SetInfoLogLevel; + // Write an entry to the log file with the specified format. + virtual void Logv(const char* format, va_list ap); + // Write an entry to the log file with the specified log level + // and format. Any log with level under the internal log level + // of *this (see @SetInfoLogLevel and @GetInfoLogLevel) will not be + // printed. + virtual void Logv(const InfoLogLevel log_level, const char* format, + va_list ap); + + private: + jmethodID m_jLogMethodId; + jobject m_jdebug_level; + jobject m_jinfo_level; + jobject m_jwarn_level; + jobject m_jerror_level; + jobject m_jfatal_level; + jobject m_jheader_level; + std::unique_ptr format_str(const char* format, va_list ap) const; +}; +} // namespace ROCKSDB_NAMESPACE + +#endif // JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_ diff --git a/src/rocksdb/java/rocksjni/lru_cache.cc b/src/rocksdb/java/rocksjni/lru_cache.cc new file mode 100644 index 000000000..56dffa2f0 --- /dev/null +++ b/src/rocksdb/java/rocksjni/lru_cache.cc @@ -0,0 +1,49 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::LRUCache. + +#include "cache/lru_cache.h" + +#include + +#include "include/org_rocksdb_LRUCache.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_LRUCache + * Method: newLRUCache + * Signature: (JIZD)J + */ +jlong Java_org_rocksdb_LRUCache_newLRUCache(JNIEnv* /*env*/, jclass /*jcls*/, + jlong jcapacity, + jint jnum_shard_bits, + jboolean jstrict_capacity_limit, + jdouble jhigh_pri_pool_ratio, + jdouble jlow_pri_pool_ratio) { + auto* sptr_lru_cache = new std::shared_ptr( + ROCKSDB_NAMESPACE::NewLRUCache( + static_cast(jcapacity), static_cast(jnum_shard_bits), + static_cast(jstrict_capacity_limit), + static_cast(jhigh_pri_pool_ratio), + nullptr /* memory_allocator */, rocksdb::kDefaultToAdaptiveMutex, + rocksdb::kDefaultCacheMetadataChargePolicy, + static_cast(jlow_pri_pool_ratio))); + return GET_CPLUSPLUS_POINTER(sptr_lru_cache); +} + +/* + * Class: org_rocksdb_LRUCache + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_LRUCache_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* sptr_lru_cache = + reinterpret_cast*>(jhandle); + delete sptr_lru_cache; // delete std::shared_ptr +} diff --git a/src/rocksdb/java/rocksjni/memory_util.cc b/src/rocksdb/java/rocksjni/memory_util.cc new file mode 100644 index 000000000..c87c4f403 --- /dev/null +++ b/src/rocksdb/java/rocksjni/memory_util.cc @@ -0,0 +1,100 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include "rocksdb/utilities/memory_util.h" + +#include + +#include +#include +#include +#include + +#include "include/org_rocksdb_MemoryUtil.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_MemoryUtil + * Method: getApproximateMemoryUsageByType + * Signature: ([J[J)Ljava/util/Map; + */ +jobject Java_org_rocksdb_MemoryUtil_getApproximateMemoryUsageByType( + JNIEnv *env, jclass, jlongArray jdb_handles, jlongArray jcache_handles) { + jboolean has_exception = JNI_FALSE; + std::vector dbs = + ROCKSDB_NAMESPACE::JniUtil::fromJPointers( + env, jdb_handles, &has_exception); + if (has_exception == JNI_TRUE) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + std::unordered_set cache_set; + jsize cache_handle_count = env->GetArrayLength(jcache_handles); + if (cache_handle_count > 0) { + jlong *ptr_jcache_handles = + env->GetLongArrayElements(jcache_handles, nullptr); + if (ptr_jcache_handles == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + for (jsize i = 0; i < cache_handle_count; i++) { + auto *cache_ptr = + reinterpret_cast *>( + ptr_jcache_handles[i]); + cache_set.insert(cache_ptr->get()); + } + env->ReleaseLongArrayElements(jcache_handles, ptr_jcache_handles, + JNI_ABORT); + } + + std::map usage_by_type; + if (ROCKSDB_NAMESPACE::MemoryUtil::GetApproximateMemoryUsageByType( + dbs, cache_set, &usage_by_type) != ROCKSDB_NAMESPACE::Status::OK()) { + // Non-OK status + return nullptr; + } + + jobject jusage_by_type = ROCKSDB_NAMESPACE::HashMapJni::construct( + env, static_cast(usage_by_type.size())); + if (jusage_by_type == nullptr) { + // exception occurred + return nullptr; + } + const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV< + const ROCKSDB_NAMESPACE::MemoryUtil::UsageType, const uint64_t, jobject, + jobject> + fn_map_kv = [env]( + const std::pair &pair) { + // Construct key + const jobject jusage_type = ROCKSDB_NAMESPACE::ByteJni::valueOf( + env, ROCKSDB_NAMESPACE::MemoryUsageTypeJni::toJavaMemoryUsageType( + pair.first)); + if (jusage_type == nullptr) { + // an error occurred + return std::unique_ptr>(nullptr); + } + // Construct value + const jobject jusage_value = + ROCKSDB_NAMESPACE::LongJni::valueOf(env, pair.second); + if (jusage_value == nullptr) { + // an error occurred + return std::unique_ptr>(nullptr); + } + // Construct and return pointer to pair of jobjects + return std::unique_ptr>( + new std::pair(jusage_type, jusage_value)); + }; + + if (!ROCKSDB_NAMESPACE::HashMapJni::putAll(env, jusage_by_type, + usage_by_type.begin(), + usage_by_type.end(), fn_map_kv)) { + // exception occcurred + jusage_by_type = nullptr; + } + + return jusage_by_type; +} diff --git a/src/rocksdb/java/rocksjni/memtablejni.cc b/src/rocksdb/java/rocksjni/memtablejni.cc new file mode 100644 index 000000000..a4d02f354 --- /dev/null +++ b/src/rocksdb/java/rocksjni/memtablejni.cc @@ -0,0 +1,94 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for MemTables. + +#include "include/org_rocksdb_HashLinkedListMemTableConfig.h" +#include "include/org_rocksdb_HashSkipListMemTableConfig.h" +#include "include/org_rocksdb_SkipListMemTableConfig.h" +#include "include/org_rocksdb_VectorMemTableConfig.h" +#include "rocksdb/memtablerep.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_HashSkipListMemTableConfig + * Method: newMemTableFactoryHandle + * Signature: (JII)J + */ +jlong Java_org_rocksdb_HashSkipListMemTableConfig_newMemTableFactoryHandle( + JNIEnv* env, jobject /*jobj*/, jlong jbucket_count, jint jheight, + jint jbranching_factor) { + ROCKSDB_NAMESPACE::Status s = + ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jbucket_count); + if (s.ok()) { + return GET_CPLUSPLUS_POINTER(ROCKSDB_NAMESPACE::NewHashSkipListRepFactory( + static_cast(jbucket_count), static_cast(jheight), + static_cast(jbranching_factor))); + } + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + return 0; +} + +/* + * Class: org_rocksdb_HashLinkedListMemTableConfig + * Method: newMemTableFactoryHandle + * Signature: (JJIZI)J + */ +jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle( + JNIEnv* env, jobject /*jobj*/, jlong jbucket_count, + jlong jhuge_page_tlb_size, jint jbucket_entries_logging_threshold, + jboolean jif_log_bucket_dist_when_flash, jint jthreshold_use_skiplist) { + ROCKSDB_NAMESPACE::Status statusBucketCount = + ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jbucket_count); + ROCKSDB_NAMESPACE::Status statusHugePageTlb = + ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + jhuge_page_tlb_size); + if (statusBucketCount.ok() && statusHugePageTlb.ok()) { + return GET_CPLUSPLUS_POINTER(ROCKSDB_NAMESPACE::NewHashLinkListRepFactory( + static_cast(jbucket_count), + static_cast(jhuge_page_tlb_size), + static_cast(jbucket_entries_logging_threshold), + static_cast(jif_log_bucket_dist_when_flash), + static_cast(jthreshold_use_skiplist))); + } + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew( + env, !statusBucketCount.ok() ? statusBucketCount : statusHugePageTlb); + return 0; +} + +/* + * Class: org_rocksdb_VectorMemTableConfig + * Method: newMemTableFactoryHandle + * Signature: (J)J + */ +jlong Java_org_rocksdb_VectorMemTableConfig_newMemTableFactoryHandle( + JNIEnv* env, jobject /*jobj*/, jlong jreserved_size) { + ROCKSDB_NAMESPACE::Status s = + ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jreserved_size); + if (s.ok()) { + return GET_CPLUSPLUS_POINTER(new ROCKSDB_NAMESPACE::VectorRepFactory( + static_cast(jreserved_size))); + } + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + return 0; +} + +/* + * Class: org_rocksdb_SkipListMemTableConfig + * Method: newMemTableFactoryHandle0 + * Signature: (J)J + */ +jlong Java_org_rocksdb_SkipListMemTableConfig_newMemTableFactoryHandle0( + JNIEnv* env, jobject /*jobj*/, jlong jlookahead) { + ROCKSDB_NAMESPACE::Status s = + ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jlookahead); + if (s.ok()) { + return GET_CPLUSPLUS_POINTER(new ROCKSDB_NAMESPACE::SkipListFactory( + static_cast(jlookahead))); + } + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + return 0; +} diff --git a/src/rocksdb/java/rocksjni/merge_operator.cc b/src/rocksdb/java/rocksjni/merge_operator.cc new file mode 100644 index 000000000..ce3c5df56 --- /dev/null +++ b/src/rocksdb/java/rocksjni/merge_operator.cc @@ -0,0 +1,98 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for ROCKSDB_NAMESPACE::MergeOperator. + +#include "rocksdb/merge_operator.h" + +#include +#include +#include + +#include +#include + +#include "include/org_rocksdb_StringAppendOperator.h" +#include "include/org_rocksdb_UInt64AddOperator.h" +#include "rocksdb/db.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/options.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/statistics.h" +#include "rocksdb/table.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" +#include "utilities/merge_operators.h" + +/* + * Class: org_rocksdb_StringAppendOperator + * Method: newSharedStringAppendOperator + * Signature: (C)J + */ +jlong Java_org_rocksdb_StringAppendOperator_newSharedStringAppendOperator__C( + JNIEnv* /*env*/, jclass /*jclazz*/, jchar jdelim) { + auto* sptr_string_append_op = + new std::shared_ptr( + ROCKSDB_NAMESPACE::MergeOperators::CreateStringAppendOperator( + (char)jdelim)); + return GET_CPLUSPLUS_POINTER(sptr_string_append_op); +} + +jlong Java_org_rocksdb_StringAppendOperator_newSharedStringAppendOperator__Ljava_lang_String_2( + JNIEnv* env, jclass /*jclass*/, jstring jdelim) { + jboolean has_exception = JNI_FALSE; + auto delim = + ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jdelim, &has_exception); + if (has_exception == JNI_TRUE) { + return 0; + } + auto* sptr_string_append_op = + new std::shared_ptr( + ROCKSDB_NAMESPACE::MergeOperators::CreateStringAppendOperator(delim)); + return GET_CPLUSPLUS_POINTER(sptr_string_append_op); +} + +/* + * Class: org_rocksdb_StringAppendOperator + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_StringAppendOperator_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* sptr_string_append_op = + reinterpret_cast*>( + jhandle); + delete sptr_string_append_op; // delete std::shared_ptr +} + +/* + * Class: org_rocksdb_UInt64AddOperator + * Method: newSharedUInt64AddOperator + * Signature: ()J + */ +jlong Java_org_rocksdb_UInt64AddOperator_newSharedUInt64AddOperator( + JNIEnv* /*env*/, jclass /*jclazz*/) { + auto* sptr_uint64_add_op = + new std::shared_ptr( + ROCKSDB_NAMESPACE::MergeOperators::CreateUInt64AddOperator()); + return GET_CPLUSPLUS_POINTER(sptr_uint64_add_op); +} + +/* + * Class: org_rocksdb_UInt64AddOperator + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_UInt64AddOperator_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* sptr_uint64_add_op = + reinterpret_cast*>( + jhandle); + delete sptr_uint64_add_op; // delete std::shared_ptr +} diff --git a/src/rocksdb/java/rocksjni/native_comparator_wrapper_test.cc b/src/rocksdb/java/rocksjni/native_comparator_wrapper_test.cc new file mode 100644 index 000000000..ac33ca22d --- /dev/null +++ b/src/rocksdb/java/rocksjni/native_comparator_wrapper_test.cc @@ -0,0 +1,45 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include + +#include + +#include "include/org_rocksdb_NativeComparatorWrapperTest_NativeStringComparatorWrapper.h" +#include "rocksdb/comparator.h" +#include "rocksdb/slice.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +namespace ROCKSDB_NAMESPACE { + +class NativeComparatorWrapperTestStringComparator : public Comparator { + const char* Name() const { + return "NativeComparatorWrapperTestStringComparator"; + } + + int Compare(const Slice& a, const Slice& b) const { + return a.ToString().compare(b.ToString()); + } + + void FindShortestSeparator(std::string* /*start*/, + const Slice& /*limit*/) const { + return; + } + + void FindShortSuccessor(std::string* /*key*/) const { return; } +}; +} // namespace ROCKSDB_NAMESPACE + +/* + * Class: org_rocksdb_NativeComparatorWrapperTest_NativeStringComparatorWrapper + * Method: newStringComparator + * Signature: ()J + */ +jlong Java_org_rocksdb_NativeComparatorWrapperTest_00024NativeStringComparatorWrapper_newStringComparator( + JNIEnv* /*env*/, jobject /*jobj*/) { + auto* comparator = + new ROCKSDB_NAMESPACE::NativeComparatorWrapperTestStringComparator(); + return GET_CPLUSPLUS_POINTER(comparator); +} diff --git a/src/rocksdb/java/rocksjni/optimistic_transaction_db.cc b/src/rocksdb/java/rocksjni/optimistic_transaction_db.cc new file mode 100644 index 000000000..238224f58 --- /dev/null +++ b/src/rocksdb/java/rocksjni/optimistic_transaction_db.cc @@ -0,0 +1,270 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for ROCKSDB_NAMESPACE::TransactionDB. + +#include "rocksdb/utilities/optimistic_transaction_db.h" + +#include + +#include "include/org_rocksdb_OptimisticTransactionDB.h" +#include "rocksdb/options.h" +#include "rocksdb/utilities/transaction.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: open + * Signature: (JLjava/lang/String;)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2( + JNIEnv* env, jclass, jlong joptions_handle, jstring jdb_path) { + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + + auto* options = + reinterpret_cast(joptions_handle); + ROCKSDB_NAMESPACE::OptimisticTransactionDB* otdb = nullptr; + ROCKSDB_NAMESPACE::Status s = + ROCKSDB_NAMESPACE::OptimisticTransactionDB::Open(*options, db_path, + &otdb); + env->ReleaseStringUTFChars(jdb_path, db_path); + + if (s.ok()) { + return GET_CPLUSPLUS_POINTER(otdb); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return 0; + } +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: open + * Signature: (JLjava/lang/String;[[B[J)[J + */ +jlongArray +Java_org_rocksdb_OptimisticTransactionDB_open__JLjava_lang_String_2_3_3B_3J( + JNIEnv* env, jclass, jlong jdb_options_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options_handles) { + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + std::vector column_families; + const jsize len_cols = env->GetArrayLength(jcolumn_names); + if (len_cols > 0) { + jlong* jco = env->GetLongArrayElements(jcolumn_options_handles, nullptr); + if (jco == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + for (int i = 0; i < len_cols; i++) { + const jobject jcn = env->GetObjectArrayElement(jcolumn_names, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + const jbyteArray jcn_ba = reinterpret_cast(jcn); + const jsize jcf_name_len = env->GetArrayLength(jcn_ba); + jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, nullptr); + if (jcf_name == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jcn); + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + const std::string cf_name(reinterpret_cast(jcf_name), + jcf_name_len); + const ROCKSDB_NAMESPACE::ColumnFamilyOptions* cf_options = + reinterpret_cast(jco[i]); + column_families.push_back( + ROCKSDB_NAMESPACE::ColumnFamilyDescriptor(cf_name, *cf_options)); + + env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT); + env->DeleteLocalRef(jcn); + } + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + } + + auto* db_options = + reinterpret_cast(jdb_options_handle); + std::vector handles; + ROCKSDB_NAMESPACE::OptimisticTransactionDB* otdb = nullptr; + const ROCKSDB_NAMESPACE::Status s = + ROCKSDB_NAMESPACE::OptimisticTransactionDB::Open( + *db_options, db_path, column_families, &handles, &otdb); + + env->ReleaseStringUTFChars(jdb_path, db_path); + + // check if open operation was successful + if (s.ok()) { + const jsize resultsLen = 1 + len_cols; // db handle + column family handles + std::unique_ptr results = + std::unique_ptr(new jlong[resultsLen]); + results[0] = reinterpret_cast(otdb); + for (int i = 1; i <= len_cols; i++) { + results[i] = reinterpret_cast(handles[i - 1]); + } + + jlongArray jresults = env->NewLongArray(resultsLen); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetLongArrayRegion(jresults, 0, resultsLen, results.get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + return nullptr; + } + return jresults; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_OptimisticTransactionDB_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + assert(optimistic_txn_db != nullptr); + delete optimistic_txn_db; +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: closeDatabase + * Signature: (J)V + */ +void Java_org_rocksdb_OptimisticTransactionDB_closeDatabase(JNIEnv* env, jclass, + jlong jhandle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + assert(optimistic_txn_db != nullptr); + ROCKSDB_NAMESPACE::Status s = optimistic_txn_db->Close(); + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: beginTransaction + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction__JJ( + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + ROCKSDB_NAMESPACE::Transaction* txn = + optimistic_txn_db->BeginTransaction(*write_options); + return GET_CPLUSPLUS_POINTER(txn); +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: beginTransaction + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction__JJJ( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jwrite_options_handle, jlong joptimistic_txn_options_handle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* optimistic_txn_options = + reinterpret_cast( + joptimistic_txn_options_handle); + ROCKSDB_NAMESPACE::Transaction* txn = optimistic_txn_db->BeginTransaction( + *write_options, *optimistic_txn_options); + return GET_CPLUSPLUS_POINTER(txn); +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: beginTransaction_withOld + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJ( + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle, + jlong jold_txn_handle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* old_txn = + reinterpret_cast(jold_txn_handle); + ROCKSDB_NAMESPACE::OptimisticTransactionOptions optimistic_txn_options; + ROCKSDB_NAMESPACE::Transaction* txn = optimistic_txn_db->BeginTransaction( + *write_options, optimistic_txn_options, old_txn); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_optimistic_txn + assert(txn == old_txn); + + return GET_CPLUSPLUS_POINTER(txn); +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: beginTransaction_withOld + * Signature: (JJJJ)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_beginTransaction_1withOld__JJJJ( + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle, + jlong joptimistic_txn_options_handle, jlong jold_txn_handle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* optimistic_txn_options = + reinterpret_cast( + joptimistic_txn_options_handle); + auto* old_txn = + reinterpret_cast(jold_txn_handle); + ROCKSDB_NAMESPACE::Transaction* txn = optimistic_txn_db->BeginTransaction( + *write_options, *optimistic_txn_options, old_txn); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_optimisic_txn + assert(txn == old_txn); + + return GET_CPLUSPLUS_POINTER(txn); +} + +/* + * Class: org_rocksdb_OptimisticTransactionDB + * Method: getBaseDB + * Signature: (J)J + */ +jlong Java_org_rocksdb_OptimisticTransactionDB_getBaseDB(JNIEnv*, jobject, + jlong jhandle) { + auto* optimistic_txn_db = + reinterpret_cast(jhandle); + return GET_CPLUSPLUS_POINTER(optimistic_txn_db->GetBaseDB()); +} diff --git a/src/rocksdb/java/rocksjni/optimistic_transaction_options.cc b/src/rocksdb/java/rocksjni/optimistic_transaction_options.cc new file mode 100644 index 000000000..501c6c4fb --- /dev/null +++ b/src/rocksdb/java/rocksjni/optimistic_transaction_options.cc @@ -0,0 +1,78 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for ROCKSDB_NAMESPACE::OptimisticTransactionOptions. + +#include + +#include "include/org_rocksdb_OptimisticTransactionOptions.h" +#include "rocksdb/comparator.h" +#include "rocksdb/utilities/optimistic_transaction_db.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_OptimisticTransactionOptions + * Method: newOptimisticTransactionOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_OptimisticTransactionOptions_newOptimisticTransactionOptions( + JNIEnv* /*env*/, jclass /*jcls*/) { + ROCKSDB_NAMESPACE::OptimisticTransactionOptions* opts = + new ROCKSDB_NAMESPACE::OptimisticTransactionOptions(); + return GET_CPLUSPLUS_POINTER(opts); +} + +/* + * Class: org_rocksdb_OptimisticTransactionOptions + * Method: isSetSnapshot + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_OptimisticTransactionOptions_isSetSnapshot( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* opts = + reinterpret_cast( + jhandle); + return opts->set_snapshot; +} + +/* + * Class: org_rocksdb_OptimisticTransactionOptions + * Method: setSetSnapshot + * Signature: (JZ)V + */ +void Java_org_rocksdb_OptimisticTransactionOptions_setSetSnapshot( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean jset_snapshot) { + auto* opts = + reinterpret_cast( + jhandle); + opts->set_snapshot = jset_snapshot; +} + +/* + * Class: org_rocksdb_OptimisticTransactionOptions + * Method: setComparator + * Signature: (JJ)V + */ +void Java_org_rocksdb_OptimisticTransactionOptions_setComparator( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jcomparator_handle) { + auto* opts = + reinterpret_cast( + jhandle); + opts->cmp = + reinterpret_cast(jcomparator_handle); +} + +/* + * Class: org_rocksdb_OptimisticTransactionOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_OptimisticTransactionOptions_disposeInternal( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + delete reinterpret_cast( + jhandle); +} diff --git a/src/rocksdb/java/rocksjni/options.cc b/src/rocksdb/java/rocksjni/options.cc new file mode 100644 index 000000000..b848ea9cf --- /dev/null +++ b/src/rocksdb/java/rocksjni/options.cc @@ -0,0 +1,8687 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Options. + +#include "rocksdb/options.h" + +#include +#include +#include + +#include +#include + +#include "include/org_rocksdb_ColumnFamilyOptions.h" +#include "include/org_rocksdb_ComparatorOptions.h" +#include "include/org_rocksdb_DBOptions.h" +#include "include/org_rocksdb_FlushOptions.h" +#include "include/org_rocksdb_Options.h" +#include "include/org_rocksdb_ReadOptions.h" +#include "include/org_rocksdb_WriteOptions.h" +#include "rocksdb/comparator.h" +#include "rocksdb/convenience.h" +#include "rocksdb/db.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/rate_limiter.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/sst_partitioner.h" +#include "rocksdb/statistics.h" +#include "rocksdb/table.h" +#include "rocksjni/comparatorjnicallback.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" +#include "rocksjni/statisticsjni.h" +#include "rocksjni/table_filter_jnicallback.h" +#include "utilities/merge_operators.h" + +/* + * Class: org_rocksdb_Options + * Method: newOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_Options_newOptions__(JNIEnv*, jclass) { + auto* op = new ROCKSDB_NAMESPACE::Options(); + return GET_CPLUSPLUS_POINTER(op); +} + +/* + * Class: org_rocksdb_Options + * Method: newOptions + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv*, jclass, jlong jdboptions, + jlong jcfoptions) { + auto* dbOpt = + reinterpret_cast(jdboptions); + auto* cfOpt = reinterpret_cast( + jcfoptions); + auto* op = new ROCKSDB_NAMESPACE::Options(*dbOpt, *cfOpt); + return GET_CPLUSPLUS_POINTER(op); +} + +/* + * Class: org_rocksdb_Options + * Method: copyOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_copyOptions(JNIEnv*, jclass, jlong jhandle) { + auto new_opt = new ROCKSDB_NAMESPACE::Options( + *(reinterpret_cast(jhandle))); + return GET_CPLUSPLUS_POINTER(new_opt); +} + +/* + * Class: org_rocksdb_Options + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_Options_disposeInternal(JNIEnv*, jobject, jlong handle) { + auto* op = reinterpret_cast(handle); + assert(op != nullptr); + delete op; +} + +/* + * Class: org_rocksdb_Options + * Method: setIncreaseParallelism + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setIncreaseParallelism(JNIEnv*, jobject, + jlong jhandle, + jint totalThreads) { + reinterpret_cast(jhandle)->IncreaseParallelism( + static_cast(totalThreads)); +} + +/* + * Class: org_rocksdb_Options + * Method: setCreateIfMissing + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setCreateIfMissing(JNIEnv*, jobject, + jlong jhandle, jboolean flag) { + reinterpret_cast(jhandle)->create_if_missing = + flag; +} + +/* + * Class: org_rocksdb_Options + * Method: createIfMissing + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_createIfMissing(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->create_if_missing; +} + +/* + * Class: org_rocksdb_Options + * Method: setCreateMissingColumnFamilies + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setCreateMissingColumnFamilies(JNIEnv*, jobject, + jlong jhandle, + jboolean flag) { + reinterpret_cast(jhandle) + ->create_missing_column_families = flag; +} + +/* + * Class: org_rocksdb_Options + * Method: createMissingColumnFamilies + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_createMissingColumnFamilies(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->create_missing_column_families; +} + +/* + * Class: org_rocksdb_Options + * Method: setComparatorHandle + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setComparatorHandle__JI(JNIEnv*, jobject, + jlong jhandle, + jint builtinComparator) { + switch (builtinComparator) { + case 1: + reinterpret_cast(jhandle)->comparator = + ROCKSDB_NAMESPACE::ReverseBytewiseComparator(); + break; + default: + reinterpret_cast(jhandle)->comparator = + ROCKSDB_NAMESPACE::BytewiseComparator(); + break; + } +} + +/* + * Class: org_rocksdb_Options + * Method: setComparatorHandle + * Signature: (JJB)V + */ +void Java_org_rocksdb_Options_setComparatorHandle__JJB(JNIEnv*, jobject, + jlong jopt_handle, + jlong jcomparator_handle, + jbyte jcomparator_type) { + ROCKSDB_NAMESPACE::Comparator* comparator = nullptr; + switch (jcomparator_type) { + // JAVA_COMPARATOR + case 0x0: + comparator = reinterpret_cast( + jcomparator_handle); + break; + + // JAVA_NATIVE_COMPARATOR_WRAPPER + case 0x1: + comparator = + reinterpret_cast(jcomparator_handle); + break; + } + auto* opt = reinterpret_cast(jopt_handle); + opt->comparator = comparator; +} + +/* + * Class: org_rocksdb_Options + * Method: setMergeOperatorName + * Signature: (JJjava/lang/String)V + */ +void Java_org_rocksdb_Options_setMergeOperatorName(JNIEnv* env, jobject, + jlong jhandle, + jstring jop_name) { + const char* op_name = env->GetStringUTFChars(jop_name, nullptr); + if (op_name == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + auto* options = reinterpret_cast(jhandle); + options->merge_operator = + ROCKSDB_NAMESPACE::MergeOperators::CreateFromStringId(op_name); + + env->ReleaseStringUTFChars(jop_name, op_name); +} + +/* + * Class: org_rocksdb_Options + * Method: setMergeOperator + * Signature: (JJjava/lang/String)V + */ +void Java_org_rocksdb_Options_setMergeOperator(JNIEnv*, jobject, jlong jhandle, + jlong mergeOperatorHandle) { + reinterpret_cast(jhandle)->merge_operator = + *(reinterpret_cast*>( + mergeOperatorHandle)); +} + +/* + * Class: org_rocksdb_Options + * Method: setCompactionFilterHandle + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setCompactionFilterHandle( + JNIEnv*, jobject, jlong jopt_handle, jlong jcompactionfilter_handle) { + reinterpret_cast(jopt_handle) + ->compaction_filter = + reinterpret_cast( + jcompactionfilter_handle); +} + +/* + * Class: org_rocksdb_Options + * Method: setCompactionFilterFactoryHandle + * Signature: (JJ)V + */ +void JNICALL Java_org_rocksdb_Options_setCompactionFilterFactoryHandle( + JNIEnv*, jobject, jlong jopt_handle, + jlong jcompactionfilterfactory_handle) { + auto* cff_factory = reinterpret_cast< + std::shared_ptr*>( + jcompactionfilterfactory_handle); + reinterpret_cast(jopt_handle) + ->compaction_filter_factory = *cff_factory; +} + +/* + * Class: org_rocksdb_Options + * Method: setWriteBufferSize + * Signature: (JJ)I + */ +void Java_org_rocksdb_Options_setWriteBufferSize(JNIEnv* env, jobject, + jlong jhandle, + jlong jwrite_buffer_size) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + jwrite_buffer_size); + if (s.ok()) { + reinterpret_cast(jhandle)->write_buffer_size = + jwrite_buffer_size; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: setWriteBufferManager + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWriteBufferManager( + JNIEnv*, jobject, jlong joptions_handle, + jlong jwrite_buffer_manager_handle) { + auto* write_buffer_manager = + reinterpret_cast*>( + jwrite_buffer_manager_handle); + reinterpret_cast(joptions_handle) + ->write_buffer_manager = *write_buffer_manager; +} + +/* + * Class: org_rocksdb_Options + * Method: writeBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_writeBufferSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->write_buffer_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxWriteBufferNumber + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxWriteBufferNumber( + JNIEnv*, jobject, jlong jhandle, jint jmax_write_buffer_number) { + reinterpret_cast(jhandle) + ->max_write_buffer_number = jmax_write_buffer_number; +} + +/* + * Class: org_rocksdb_Options + * Method: setStatistics + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setStatistics(JNIEnv*, jobject, jlong jhandle, + jlong jstatistics_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* pSptr = + reinterpret_cast*>( + jstatistics_handle); + opt->statistics = *pSptr; +} + +/* + * Class: org_rocksdb_Options + * Method: statistics + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_statistics(JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + std::shared_ptr sptr = opt->statistics; + if (sptr == nullptr) { + return 0; + } else { + std::shared_ptr* pSptr = + new std::shared_ptr(sptr); + return GET_CPLUSPLUS_POINTER(pSptr); + } +} + +/* + * Class: org_rocksdb_Options + * Method: maxWriteBufferNumber + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxWriteBufferNumber(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_write_buffer_number; +} + +/* + * Class: org_rocksdb_Options + * Method: errorIfExists + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_errorIfExists(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->error_if_exists; +} + +/* + * Class: org_rocksdb_Options + * Method: setErrorIfExists + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setErrorIfExists(JNIEnv*, jobject, jlong jhandle, + jboolean error_if_exists) { + reinterpret_cast(jhandle)->error_if_exists = + static_cast(error_if_exists); +} + +/* + * Class: org_rocksdb_Options + * Method: paranoidChecks + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_paranoidChecks(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->paranoid_checks; +} + +/* + * Class: org_rocksdb_Options + * Method: setParanoidChecks + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setParanoidChecks(JNIEnv*, jobject, jlong jhandle, + jboolean paranoid_checks) { + reinterpret_cast(jhandle)->paranoid_checks = + static_cast(paranoid_checks); +} + +/* + * Class: org_rocksdb_Options + * Method: setEnv + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setEnv(JNIEnv*, jobject, jlong jhandle, + jlong jenv) { + reinterpret_cast(jhandle)->env = + reinterpret_cast(jenv); +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxTotalWalSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxTotalWalSize(JNIEnv*, jobject, + jlong jhandle, + jlong jmax_total_wal_size) { + reinterpret_cast(jhandle)->max_total_wal_size = + static_cast(jmax_total_wal_size); +} + +/* + * Class: org_rocksdb_Options + * Method: maxTotalWalSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxTotalWalSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_total_wal_size; +} + +/* + * Class: org_rocksdb_Options + * Method: maxOpenFiles + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxOpenFiles(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->max_open_files; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxOpenFiles + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxOpenFiles(JNIEnv*, jobject, jlong jhandle, + jint max_open_files) { + reinterpret_cast(jhandle)->max_open_files = + static_cast(max_open_files); +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxFileOpeningThreads + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxFileOpeningThreads( + JNIEnv*, jobject, jlong jhandle, jint jmax_file_opening_threads) { + reinterpret_cast(jhandle) + ->max_file_opening_threads = static_cast(jmax_file_opening_threads); +} + +/* + * Class: org_rocksdb_Options + * Method: maxFileOpeningThreads + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxFileOpeningThreads(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_file_opening_threads); +} + +/* + * Class: org_rocksdb_Options + * Method: useFsync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_useFsync(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->use_fsync; +} + +/* + * Class: org_rocksdb_Options + * Method: setUseFsync + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setUseFsync(JNIEnv*, jobject, jlong jhandle, + jboolean use_fsync) { + reinterpret_cast(jhandle)->use_fsync = + static_cast(use_fsync); +} + +/* + * Class: org_rocksdb_Options + * Method: setDbPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_Options_setDbPaths(JNIEnv* env, jobject, jlong jhandle, + jobjectArray jpaths, + jlongArray jtarget_sizes) { + std::vector db_paths; + jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr); + if (ptr_jtarget_size == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + jboolean has_exception = JNI_FALSE; + const jsize len = env->GetArrayLength(jpaths); + for (jsize i = 0; i < len; i++) { + jobject jpath = + reinterpret_cast(env->GetObjectArrayElement(jpaths, i)); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + std::string path = ROCKSDB_NAMESPACE::JniUtil::copyStdString( + env, static_cast(jpath), &has_exception); + env->DeleteLocalRef(jpath); + + if (has_exception == JNI_TRUE) { + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + + jlong jtarget_size = ptr_jtarget_size[i]; + + db_paths.push_back( + ROCKSDB_NAMESPACE::DbPath(path, static_cast(jtarget_size))); + } + + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + + auto* opt = reinterpret_cast(jhandle); + opt->db_paths = db_paths; +} + +/* + * Class: org_rocksdb_Options + * Method: dbPathsLen + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_dbPathsLen(JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->db_paths.size()); +} + +/* + * Class: org_rocksdb_Options + * Method: dbPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_Options_dbPaths(JNIEnv* env, jobject, jlong jhandle, + jobjectArray jpaths, + jlongArray jtarget_sizes) { + jboolean is_copy; + jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, &is_copy); + if (ptr_jtarget_size == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + auto* opt = reinterpret_cast(jhandle); + const jsize len = env->GetArrayLength(jpaths); + for (jsize i = 0; i < len; i++) { + ROCKSDB_NAMESPACE::DbPath db_path = opt->db_paths[i]; + + jstring jpath = env->NewStringUTF(db_path.path.c_str()); + if (jpath == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + env->SetObjectArrayElement(jpaths, i, jpath); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jpath); + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + + ptr_jtarget_size[i] = static_cast(db_path.target_size); + } + + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, + is_copy == JNI_TRUE ? 0 : JNI_ABORT); +} + +/* + * Class: org_rocksdb_Options + * Method: dbLogDir + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_Options_dbLogDir(JNIEnv* env, jobject, jlong jhandle) { + return env->NewStringUTF( + reinterpret_cast(jhandle) + ->db_log_dir.c_str()); +} + +/* + * Class: org_rocksdb_Options + * Method: setDbLogDir + * Signature: (JLjava/lang/String)V + */ +void Java_org_rocksdb_Options_setDbLogDir(JNIEnv* env, jobject, jlong jhandle, + jstring jdb_log_dir) { + const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr); + if (log_dir == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + reinterpret_cast(jhandle)->db_log_dir.assign( + log_dir); + env->ReleaseStringUTFChars(jdb_log_dir, log_dir); +} + +/* + * Class: org_rocksdb_Options + * Method: walDir + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_Options_walDir(JNIEnv* env, jobject, jlong jhandle) { + return env->NewStringUTF( + reinterpret_cast(jhandle)->wal_dir.c_str()); +} + +/* + * Class: org_rocksdb_Options + * Method: setWalDir + * Signature: (JLjava/lang/String)V + */ +void Java_org_rocksdb_Options_setWalDir(JNIEnv* env, jobject, jlong jhandle, + jstring jwal_dir) { + const char* wal_dir = env->GetStringUTFChars(jwal_dir, nullptr); + if (wal_dir == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + reinterpret_cast(jhandle)->wal_dir.assign( + wal_dir); + env->ReleaseStringUTFChars(jwal_dir, wal_dir); +} + +/* + * Class: org_rocksdb_Options + * Method: deleteObsoleteFilesPeriodMicros + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_deleteObsoleteFilesPeriodMicros(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->delete_obsolete_files_period_micros; +} + +/* + * Class: org_rocksdb_Options + * Method: setDeleteObsoleteFilesPeriodMicros + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setDeleteObsoleteFilesPeriodMicros(JNIEnv*, + jobject, + jlong jhandle, + jlong micros) { + reinterpret_cast(jhandle) + ->delete_obsolete_files_period_micros = static_cast(micros); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBackgroundCompactions + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxBackgroundCompactions(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_background_compactions; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBackgroundCompactions + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxBackgroundCompactions(JNIEnv*, jobject, + jlong jhandle, + jint max) { + reinterpret_cast(jhandle) + ->max_background_compactions = static_cast(max); +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxSubcompactions + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxSubcompactions(JNIEnv*, jobject, + jlong jhandle, jint max) { + reinterpret_cast(jhandle)->max_subcompactions = + static_cast(max); +} + +/* + * Class: org_rocksdb_Options + * Method: maxSubcompactions + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxSubcompactions(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_subcompactions; +} + +/* + * Class: org_rocksdb_Options + * Method: maxBackgroundFlushes + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxBackgroundFlushes(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_background_flushes; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBackgroundFlushes + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxBackgroundFlushes( + JNIEnv*, jobject, jlong jhandle, jint max_background_flushes) { + reinterpret_cast(jhandle) + ->max_background_flushes = static_cast(max_background_flushes); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBackgroundJobs + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxBackgroundJobs(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_background_jobs; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBackgroundJobs + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxBackgroundJobs(JNIEnv*, jobject, + jlong jhandle, + jint max_background_jobs) { + reinterpret_cast(jhandle)->max_background_jobs = + static_cast(max_background_jobs); +} + +/* + * Class: org_rocksdb_Options + * Method: maxLogFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxLogFileSize(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_log_file_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxLogFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxLogFileSize(JNIEnv* env, jobject, + jlong jhandle, + jlong max_log_file_size) { + auto s = + ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(max_log_file_size); + if (s.ok()) { + reinterpret_cast(jhandle)->max_log_file_size = + max_log_file_size; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: logFileTimeToRoll + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_logFileTimeToRoll(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->log_file_time_to_roll; +} + +/* + * Class: org_rocksdb_Options + * Method: setLogFileTimeToRoll + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setLogFileTimeToRoll( + JNIEnv* env, jobject, jlong jhandle, jlong log_file_time_to_roll) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + log_file_time_to_roll); + if (s.ok()) { + reinterpret_cast(jhandle) + ->log_file_time_to_roll = log_file_time_to_roll; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: keepLogFileNum + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_keepLogFileNum(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->keep_log_file_num; +} + +/* + * Class: org_rocksdb_Options + * Method: setKeepLogFileNum + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setKeepLogFileNum(JNIEnv* env, jobject, + jlong jhandle, + jlong keep_log_file_num) { + auto s = + ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(keep_log_file_num); + if (s.ok()) { + reinterpret_cast(jhandle)->keep_log_file_num = + keep_log_file_num; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: recycleLogFileNum + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_recycleLogFileNum(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->recycle_log_file_num; +} + +/* + * Class: org_rocksdb_Options + * Method: setRecycleLogFileNum + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setRecycleLogFileNum(JNIEnv* env, jobject, + jlong jhandle, + jlong recycle_log_file_num) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + recycle_log_file_num); + if (s.ok()) { + reinterpret_cast(jhandle) + ->recycle_log_file_num = recycle_log_file_num; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: maxManifestFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxManifestFileSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_manifest_file_size; +} + +/* + * Method: memTableFactoryName + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_Options_memTableFactoryName(JNIEnv* env, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::MemTableRepFactory* tf = opt->memtable_factory.get(); + + // Should never be nullptr. + // Default memtable factory is SkipListFactory + assert(tf); + + // temporarly fix for the historical typo + if (strcmp(tf->Name(), "HashLinkListRepFactory") == 0) { + return env->NewStringUTF("HashLinkedListRepFactory"); + } + + return env->NewStringUTF(tf->Name()); +} + +static std::vector +rocksdb_convert_cf_paths_from_java_helper(JNIEnv* env, jobjectArray path_array, + jlongArray size_array, + jboolean* has_exception) { + jboolean copy_str_has_exception; + std::vector paths = ROCKSDB_NAMESPACE::JniUtil::copyStrings( + env, path_array, ©_str_has_exception); + if (JNI_TRUE == copy_str_has_exception) { + // Exception thrown + *has_exception = JNI_TRUE; + return {}; + } + + if (static_cast(env->GetArrayLength(size_array)) != paths.size()) { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew( + env, + ROCKSDB_NAMESPACE::Status::InvalidArgument( + ROCKSDB_NAMESPACE::Slice("There should be a corresponding target " + "size for every path and vice versa."))); + *has_exception = JNI_TRUE; + return {}; + } + + jlong* size_array_ptr = env->GetLongArrayElements(size_array, nullptr); + if (nullptr == size_array_ptr) { + // exception thrown: OutOfMemoryError + *has_exception = JNI_TRUE; + return {}; + } + std::vector cf_paths; + for (size_t i = 0; i < paths.size(); ++i) { + jlong target_size = size_array_ptr[i]; + if (target_size < 0) { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew( + env, + ROCKSDB_NAMESPACE::Status::InvalidArgument(ROCKSDB_NAMESPACE::Slice( + "Path target size has to be positive."))); + *has_exception = JNI_TRUE; + env->ReleaseLongArrayElements(size_array, size_array_ptr, JNI_ABORT); + return {}; + } + cf_paths.push_back(ROCKSDB_NAMESPACE::DbPath( + paths[i], static_cast(target_size))); + } + + env->ReleaseLongArrayElements(size_array, size_array_ptr, JNI_ABORT); + + return cf_paths; +} + +/* + * Class: org_rocksdb_Options + * Method: setCfPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_Options_setCfPaths(JNIEnv* env, jclass, jlong jhandle, + jobjectArray path_array, + jlongArray size_array) { + auto* options = reinterpret_cast(jhandle); + jboolean has_exception = JNI_FALSE; + std::vector cf_paths = + rocksdb_convert_cf_paths_from_java_helper(env, path_array, size_array, + &has_exception); + if (JNI_FALSE == has_exception) { + options->cf_paths = std::move(cf_paths); + } +} + +/* + * Class: org_rocksdb_Options + * Method: cfPathsLen + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_cfPathsLen(JNIEnv*, jclass, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->cf_paths.size()); +} + +template +static void rocksdb_convert_cf_paths_to_java_helper(JNIEnv* env, jlong jhandle, + jobjectArray jpaths, + jlongArray jtarget_sizes) { + jboolean is_copy; + jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, &is_copy); + if (ptr_jtarget_size == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + auto* opt = reinterpret_cast(jhandle); + const jsize len = env->GetArrayLength(jpaths); + for (jsize i = 0; i < len; i++) { + ROCKSDB_NAMESPACE::DbPath cf_path = opt->cf_paths[i]; + + jstring jpath = env->NewStringUTF(cf_path.path.c_str()); + if (jpath == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + env->SetObjectArrayElement(jpaths, i, jpath); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jpath); + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + + ptr_jtarget_size[i] = static_cast(cf_path.target_size); + + env->DeleteLocalRef(jpath); + } + + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, + is_copy ? 0 : JNI_ABORT); +} + +/* + * Class: org_rocksdb_Options + * Method: cfPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_Options_cfPaths(JNIEnv* env, jclass, jlong jhandle, + jobjectArray jpaths, + jlongArray jtarget_sizes) { + rocksdb_convert_cf_paths_to_java_helper( + env, jhandle, jpaths, jtarget_sizes); +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxManifestFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxManifestFileSize( + JNIEnv*, jobject, jlong jhandle, jlong max_manifest_file_size) { + reinterpret_cast(jhandle) + ->max_manifest_file_size = static_cast(max_manifest_file_size); +} + +/* + * Method: setMemTableFactory + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMemTableFactory(JNIEnv*, jobject, + jlong jhandle, + jlong jfactory_handle) { + reinterpret_cast(jhandle) + ->memtable_factory.reset( + reinterpret_cast( + jfactory_handle)); +} + +/* + * Class: org_rocksdb_Options + * Method: setRateLimiter + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setRateLimiter(JNIEnv*, jobject, jlong jhandle, + jlong jrate_limiter_handle) { + std::shared_ptr* pRateLimiter = + reinterpret_cast*>( + jrate_limiter_handle); + reinterpret_cast(jhandle)->rate_limiter = + *pRateLimiter; +} + +/* + * Class: org_rocksdb_Options + * Method: setSstFileManager + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setSstFileManager( + JNIEnv*, jobject, jlong jhandle, jlong jsst_file_manager_handle) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jsst_file_manager_handle); + reinterpret_cast(jhandle)->sst_file_manager = + *sptr_sst_file_manager; +} + +/* + * Class: org_rocksdb_Options + * Method: setLogger + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setLogger(JNIEnv*, jobject, jlong jhandle, + jlong jlogger_handle) { + std::shared_ptr* pLogger = + reinterpret_cast*>( + jlogger_handle); + reinterpret_cast(jhandle)->info_log = *pLogger; +} + +/* + * Class: org_rocksdb_Options + * Method: setInfoLogLevel + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setInfoLogLevel(JNIEnv*, jobject, jlong jhandle, + jbyte jlog_level) { + reinterpret_cast(jhandle)->info_log_level = + static_cast(jlog_level); +} + +/* + * Class: org_rocksdb_Options + * Method: infoLogLevel + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_infoLogLevel(JNIEnv*, jobject, jlong jhandle) { + return static_cast( + reinterpret_cast(jhandle)->info_log_level); +} + +/* + * Class: org_rocksdb_Options + * Method: tableCacheNumshardbits + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_tableCacheNumshardbits(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->table_cache_numshardbits; +} + +/* + * Class: org_rocksdb_Options + * Method: setTableCacheNumshardbits + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setTableCacheNumshardbits( + JNIEnv*, jobject, jlong jhandle, jint table_cache_numshardbits) { + reinterpret_cast(jhandle) + ->table_cache_numshardbits = static_cast(table_cache_numshardbits); +} + +/* + * Method: useFixedLengthPrefixExtractor + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_useFixedLengthPrefixExtractor( + JNIEnv*, jobject, jlong jhandle, jint jprefix_length) { + reinterpret_cast(jhandle) + ->prefix_extractor.reset(ROCKSDB_NAMESPACE::NewFixedPrefixTransform( + static_cast(jprefix_length))); +} + +/* + * Method: useCappedPrefixExtractor + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_useCappedPrefixExtractor(JNIEnv*, jobject, + jlong jhandle, + jint jprefix_length) { + reinterpret_cast(jhandle) + ->prefix_extractor.reset(ROCKSDB_NAMESPACE::NewCappedPrefixTransform( + static_cast(jprefix_length))); +} + +/* + * Class: org_rocksdb_Options + * Method: walTtlSeconds + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_walTtlSeconds(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->WAL_ttl_seconds; +} + +/* + * Class: org_rocksdb_Options + * Method: setWalTtlSeconds + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWalTtlSeconds(JNIEnv*, jobject, jlong jhandle, + jlong WAL_ttl_seconds) { + reinterpret_cast(jhandle)->WAL_ttl_seconds = + static_cast(WAL_ttl_seconds); +} + +/* + * Class: org_rocksdb_Options + * Method: walTtlSeconds + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_walSizeLimitMB(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->WAL_size_limit_MB; +} + +/* + * Class: org_rocksdb_Options + * Method: setWalSizeLimitMB + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWalSizeLimitMB(JNIEnv*, jobject, jlong jhandle, + jlong WAL_size_limit_MB) { + reinterpret_cast(jhandle)->WAL_size_limit_MB = + static_cast(WAL_size_limit_MB); +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxWriteBatchGroupSizeBytes + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxWriteBatchGroupSizeBytes( + JNIEnv*, jclass, jlong jhandle, jlong jmax_write_batch_group_size_bytes) { + auto* opt = reinterpret_cast(jhandle); + opt->max_write_batch_group_size_bytes = + static_cast(jmax_write_batch_group_size_bytes); +} + +/* + * Class: org_rocksdb_Options + * Method: maxWriteBatchGroupSizeBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxWriteBatchGroupSizeBytes(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_write_batch_group_size_bytes); +} + +/* + * Class: org_rocksdb_Options + * Method: manifestPreallocationSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_manifestPreallocationSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->manifest_preallocation_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setManifestPreallocationSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setManifestPreallocationSize( + JNIEnv* env, jobject, jlong jhandle, jlong preallocation_size) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + preallocation_size); + if (s.ok()) { + reinterpret_cast(jhandle) + ->manifest_preallocation_size = preallocation_size; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Method: setTableFactory + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setTableFactory(JNIEnv*, jobject, jlong jhandle, + jlong jtable_factory_handle) { + auto* options = reinterpret_cast(jhandle); + auto* table_factory = + reinterpret_cast(jtable_factory_handle); + options->table_factory.reset(table_factory); +} + +/* + * Method: setSstPartitionerFactory + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setSstPartitionerFactory(JNIEnv*, jobject, + jlong jhandle, + jlong factory_handle) { + auto* options = reinterpret_cast(jhandle); + auto factory = reinterpret_cast< + std::shared_ptr*>( + factory_handle); + options->sst_partitioner_factory = *factory; +} + +/* + * Class: org_rocksdb_Options + * Method: setCompactionThreadLimiter + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setCompactionThreadLimiter( + JNIEnv*, jclass, jlong jhandle, jlong jlimiter_handle) { + auto* options = reinterpret_cast(jhandle); + auto* limiter = reinterpret_cast< + std::shared_ptr*>( + jlimiter_handle); + options->compaction_thread_limiter = *limiter; +} + +/* + * Class: org_rocksdb_Options + * Method: allowMmapReads + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowMmapReads(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->allow_mmap_reads; +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowMmapReads + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowMmapReads(JNIEnv*, jobject, jlong jhandle, + jboolean allow_mmap_reads) { + reinterpret_cast(jhandle)->allow_mmap_reads = + static_cast(allow_mmap_reads); +} + +/* + * Class: org_rocksdb_Options + * Method: allowMmapWrites + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowMmapWrites(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->allow_mmap_writes; +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowMmapWrites + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowMmapWrites(JNIEnv*, jobject, + jlong jhandle, + jboolean allow_mmap_writes) { + reinterpret_cast(jhandle)->allow_mmap_writes = + static_cast(allow_mmap_writes); +} + +/* + * Class: org_rocksdb_Options + * Method: useDirectReads + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_useDirectReads(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->use_direct_reads; +} + +/* + * Class: org_rocksdb_Options + * Method: setUseDirectReads + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setUseDirectReads(JNIEnv*, jobject, jlong jhandle, + jboolean use_direct_reads) { + reinterpret_cast(jhandle)->use_direct_reads = + static_cast(use_direct_reads); +} + +/* + * Class: org_rocksdb_Options + * Method: useDirectIoForFlushAndCompaction + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_useDirectIoForFlushAndCompaction( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->use_direct_io_for_flush_and_compaction; +} + +/* + * Class: org_rocksdb_Options + * Method: setUseDirectIoForFlushAndCompaction + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setUseDirectIoForFlushAndCompaction( + JNIEnv*, jobject, jlong jhandle, + jboolean use_direct_io_for_flush_and_compaction) { + reinterpret_cast(jhandle) + ->use_direct_io_for_flush_and_compaction = + static_cast(use_direct_io_for_flush_and_compaction); +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowFAllocate + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowFAllocate(JNIEnv*, jobject, jlong jhandle, + jboolean jallow_fallocate) { + reinterpret_cast(jhandle)->allow_fallocate = + static_cast(jallow_fallocate); +} + +/* + * Class: org_rocksdb_Options + * Method: allowFAllocate + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowFAllocate(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_fallocate); +} + +/* + * Class: org_rocksdb_Options + * Method: isFdCloseOnExec + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_isFdCloseOnExec(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->is_fd_close_on_exec; +} + +/* + * Class: org_rocksdb_Options + * Method: setIsFdCloseOnExec + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setIsFdCloseOnExec(JNIEnv*, jobject, + jlong jhandle, + jboolean is_fd_close_on_exec) { + reinterpret_cast(jhandle)->is_fd_close_on_exec = + static_cast(is_fd_close_on_exec); +} + +/* + * Class: org_rocksdb_Options + * Method: statsDumpPeriodSec + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_statsDumpPeriodSec(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->stats_dump_period_sec; +} + +/* + * Class: org_rocksdb_Options + * Method: setStatsDumpPeriodSec + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setStatsDumpPeriodSec( + JNIEnv*, jobject, jlong jhandle, jint jstats_dump_period_sec) { + reinterpret_cast(jhandle) + ->stats_dump_period_sec = + static_cast(jstats_dump_period_sec); +} + +/* + * Class: org_rocksdb_Options + * Method: statsPersistPeriodSec + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_statsPersistPeriodSec(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->stats_persist_period_sec; +} + +/* + * Class: org_rocksdb_Options + * Method: setStatsPersistPeriodSec + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setStatsPersistPeriodSec( + JNIEnv*, jobject, jlong jhandle, jint jstats_persist_period_sec) { + reinterpret_cast(jhandle) + ->stats_persist_period_sec = + static_cast(jstats_persist_period_sec); +} + +/* + * Class: org_rocksdb_Options + * Method: statsHistoryBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_statsHistoryBufferSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->stats_history_buffer_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setStatsHistoryBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setStatsHistoryBufferSize( + JNIEnv*, jobject, jlong jhandle, jlong jstats_history_buffer_size) { + reinterpret_cast(jhandle) + ->stats_history_buffer_size = + static_cast(jstats_history_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: adviseRandomOnOpen + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_adviseRandomOnOpen(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->advise_random_on_open; +} + +/* + * Class: org_rocksdb_Options + * Method: setAdviseRandomOnOpen + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAdviseRandomOnOpen( + JNIEnv*, jobject, jlong jhandle, jboolean advise_random_on_open) { + reinterpret_cast(jhandle) + ->advise_random_on_open = static_cast(advise_random_on_open); +} + +/* + * Class: org_rocksdb_Options + * Method: setDbWriteBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setDbWriteBufferSize( + JNIEnv*, jobject, jlong jhandle, jlong jdb_write_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->db_write_buffer_size = static_cast(jdb_write_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: dbWriteBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_dbWriteBufferSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->db_write_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: setAccessHintOnCompactionStart + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setAccessHintOnCompactionStart( + JNIEnv*, jobject, jlong jhandle, jbyte jaccess_hint_value) { + auto* opt = reinterpret_cast(jhandle); + opt->access_hint_on_compaction_start = + ROCKSDB_NAMESPACE::AccessHintJni::toCppAccessHint(jaccess_hint_value); +} + +/* + * Class: org_rocksdb_Options + * Method: accessHintOnCompactionStart + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_accessHintOnCompactionStart(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::AccessHintJni::toJavaAccessHint( + opt->access_hint_on_compaction_start); +} + +/* + * Class: org_rocksdb_Options + * Method: setCompactionReadaheadSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setCompactionReadaheadSize( + JNIEnv*, jobject, jlong jhandle, jlong jcompaction_readahead_size) { + auto* opt = reinterpret_cast(jhandle); + opt->compaction_readahead_size = + static_cast(jcompaction_readahead_size); +} + +/* + * Class: org_rocksdb_Options + * Method: compactionReadaheadSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_compactionReadaheadSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->compaction_readahead_size); +} + +/* + * Class: org_rocksdb_Options + * Method: setRandomAccessMaxBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setRandomAccessMaxBufferSize( + JNIEnv*, jobject, jlong jhandle, jlong jrandom_access_max_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->random_access_max_buffer_size = + static_cast(jrandom_access_max_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: randomAccessMaxBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_randomAccessMaxBufferSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->random_access_max_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: setWritableFileMaxBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWritableFileMaxBufferSize( + JNIEnv*, jobject, jlong jhandle, jlong jwritable_file_max_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->writable_file_max_buffer_size = + static_cast(jwritable_file_max_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: writableFileMaxBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_writableFileMaxBufferSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->writable_file_max_buffer_size); +} + +/* + * Class: org_rocksdb_Options + * Method: useAdaptiveMutex + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_useAdaptiveMutex(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->use_adaptive_mutex; +} + +/* + * Class: org_rocksdb_Options + * Method: setUseAdaptiveMutex + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setUseAdaptiveMutex(JNIEnv*, jobject, + jlong jhandle, + jboolean use_adaptive_mutex) { + reinterpret_cast(jhandle)->use_adaptive_mutex = + static_cast(use_adaptive_mutex); +} + +/* + * Class: org_rocksdb_Options + * Method: bytesPerSync + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_bytesPerSync(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->bytes_per_sync; +} + +/* + * Class: org_rocksdb_Options + * Method: setBytesPerSync + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setBytesPerSync(JNIEnv*, jobject, jlong jhandle, + jlong bytes_per_sync) { + reinterpret_cast(jhandle)->bytes_per_sync = + static_cast(bytes_per_sync); +} + +/* + * Class: org_rocksdb_Options + * Method: setWalBytesPerSync + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWalBytesPerSync(JNIEnv*, jobject, + jlong jhandle, + jlong jwal_bytes_per_sync) { + reinterpret_cast(jhandle)->wal_bytes_per_sync = + static_cast(jwal_bytes_per_sync); +} + +/* + * Class: org_rocksdb_Options + * Method: walBytesPerSync + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_walBytesPerSync(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->wal_bytes_per_sync); +} + +/* + * Class: org_rocksdb_Options + * Method: setStrictBytesPerSync + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setStrictBytesPerSync( + JNIEnv*, jobject, jlong jhandle, jboolean jstrict_bytes_per_sync) { + reinterpret_cast(jhandle) + ->strict_bytes_per_sync = jstrict_bytes_per_sync == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: strictBytesPerSync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_strictBytesPerSync(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->strict_bytes_per_sync); +} + +// Note: the RocksJava API currently only supports EventListeners implemented in +// Java. It could be extended in future to also support adding/removing +// EventListeners implemented in C++. +static void rocksdb_set_event_listeners_helper( + JNIEnv* env, jlongArray jlistener_array, + std::vector>& + listener_sptr_vec) { + jlong* ptr_jlistener_array = + env->GetLongArrayElements(jlistener_array, nullptr); + if (ptr_jlistener_array == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + const jsize array_size = env->GetArrayLength(jlistener_array); + listener_sptr_vec.clear(); + for (jsize i = 0; i < array_size; ++i) { + const auto& listener_sptr = + *reinterpret_cast*>( + ptr_jlistener_array[i]); + listener_sptr_vec.push_back(listener_sptr); + } +} + +/* + * Class: org_rocksdb_Options + * Method: setEventListeners + * Signature: (J[J)V + */ +void Java_org_rocksdb_Options_setEventListeners(JNIEnv* env, jclass, + jlong jhandle, + jlongArray jlistener_array) { + auto* opt = reinterpret_cast(jhandle); + rocksdb_set_event_listeners_helper(env, jlistener_array, opt->listeners); +} + +// Note: the RocksJava API currently only supports EventListeners implemented in +// Java. It could be extended in future to also support adding/removing +// EventListeners implemented in C++. +static jobjectArray rocksdb_get_event_listeners_helper( + JNIEnv* env, + const std::vector>& + listener_sptr_vec) { + jsize sz = static_cast(listener_sptr_vec.size()); + jclass jlistener_clazz = + ROCKSDB_NAMESPACE::AbstractEventListenerJni::getJClass(env); + jobjectArray jlisteners = env->NewObjectArray(sz, jlistener_clazz, nullptr); + if (jlisteners == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + for (jsize i = 0; i < sz; ++i) { + const auto* jni_cb = + static_cast( + listener_sptr_vec[i].get()); + env->SetObjectArrayElement(jlisteners, i, jni_cb->GetJavaObject()); + } + return jlisteners; +} + +/* + * Class: org_rocksdb_Options + * Method: eventListeners + * Signature: (J)[Lorg/rocksdb/AbstractEventListener; + */ +jobjectArray Java_org_rocksdb_Options_eventListeners(JNIEnv* env, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return rocksdb_get_event_listeners_helper(env, opt->listeners); +} + +/* + * Class: org_rocksdb_Options + * Method: setEnableThreadTracking + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setEnableThreadTracking( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_thread_tracking) { + auto* opt = reinterpret_cast(jhandle); + opt->enable_thread_tracking = static_cast(jenable_thread_tracking); +} + +/* + * Class: org_rocksdb_Options + * Method: enableThreadTracking + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_enableThreadTracking(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->enable_thread_tracking); +} + +/* + * Class: org_rocksdb_Options + * Method: setDelayedWriteRate + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setDelayedWriteRate(JNIEnv*, jobject, + jlong jhandle, + jlong jdelayed_write_rate) { + auto* opt = reinterpret_cast(jhandle); + opt->delayed_write_rate = static_cast(jdelayed_write_rate); +} + +/* + * Class: org_rocksdb_Options + * Method: delayedWriteRate + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_delayedWriteRate(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->delayed_write_rate); +} + +/* + * Class: org_rocksdb_Options + * Method: setEnablePipelinedWrite + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setEnablePipelinedWrite( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_pipelined_write) { + auto* opt = reinterpret_cast(jhandle); + opt->enable_pipelined_write = jenable_pipelined_write == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: enablePipelinedWrite + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_enablePipelinedWrite(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->enable_pipelined_write); +} + +/* + * Class: org_rocksdb_Options + * Method: setUnorderedWrite + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setUnorderedWrite(JNIEnv*, jobject, jlong jhandle, + jboolean unordered_write) { + reinterpret_cast(jhandle)->unordered_write = + static_cast(unordered_write); +} + +/* + * Class: org_rocksdb_Options + * Method: unorderedWrite + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_unorderedWrite(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->unordered_write; +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowConcurrentMemtableWrite + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowConcurrentMemtableWrite(JNIEnv*, jobject, + jlong jhandle, + jboolean allow) { + reinterpret_cast(jhandle) + ->allow_concurrent_memtable_write = static_cast(allow); +} + +/* + * Class: org_rocksdb_Options + * Method: allowConcurrentMemtableWrite + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowConcurrentMemtableWrite(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->allow_concurrent_memtable_write; +} + +/* + * Class: org_rocksdb_Options + * Method: setEnableWriteThreadAdaptiveYield + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setEnableWriteThreadAdaptiveYield( + JNIEnv*, jobject, jlong jhandle, jboolean yield) { + reinterpret_cast(jhandle) + ->enable_write_thread_adaptive_yield = static_cast(yield); +} + +/* + * Class: org_rocksdb_Options + * Method: enableWriteThreadAdaptiveYield + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_enableWriteThreadAdaptiveYield( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->enable_write_thread_adaptive_yield; +} + +/* + * Class: org_rocksdb_Options + * Method: setWriteThreadMaxYieldUsec + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWriteThreadMaxYieldUsec(JNIEnv*, jobject, + jlong jhandle, + jlong max) { + reinterpret_cast(jhandle) + ->write_thread_max_yield_usec = static_cast(max); +} + +/* + * Class: org_rocksdb_Options + * Method: writeThreadMaxYieldUsec + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_writeThreadMaxYieldUsec(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->write_thread_max_yield_usec; +} + +/* + * Class: org_rocksdb_Options + * Method: setWriteThreadSlowYieldUsec + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWriteThreadSlowYieldUsec(JNIEnv*, jobject, + jlong jhandle, + jlong slow) { + reinterpret_cast(jhandle) + ->write_thread_slow_yield_usec = static_cast(slow); +} + +/* + * Class: org_rocksdb_Options + * Method: writeThreadSlowYieldUsec + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_writeThreadSlowYieldUsec(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->write_thread_slow_yield_usec; +} + +/* + * Class: org_rocksdb_Options + * Method: setSkipStatsUpdateOnDbOpen + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setSkipStatsUpdateOnDbOpen( + JNIEnv*, jobject, jlong jhandle, jboolean jskip_stats_update_on_db_open) { + auto* opt = reinterpret_cast(jhandle); + opt->skip_stats_update_on_db_open = + static_cast(jskip_stats_update_on_db_open); +} + +/* + * Class: org_rocksdb_Options + * Method: skipStatsUpdateOnDbOpen + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_skipStatsUpdateOnDbOpen(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->skip_stats_update_on_db_open); +} + +/* + * Class: org_rocksdb_Options + * Method: setSkipCheckingSstFileSizesOnDbOpen + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setSkipCheckingSstFileSizesOnDbOpen( + JNIEnv*, jclass, jlong jhandle, + jboolean jskip_checking_sst_file_sizes_on_db_open) { + auto* opt = reinterpret_cast(jhandle); + opt->skip_checking_sst_file_sizes_on_db_open = + static_cast(jskip_checking_sst_file_sizes_on_db_open); +} + +/* + * Class: org_rocksdb_Options + * Method: skipCheckingSstFileSizesOnDbOpen + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_skipCheckingSstFileSizesOnDbOpen( + JNIEnv*, jclass, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->skip_checking_sst_file_sizes_on_db_open); +} + +/* + * Class: org_rocksdb_Options + * Method: setWalRecoveryMode + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setWalRecoveryMode( + JNIEnv*, jobject, jlong jhandle, jbyte jwal_recovery_mode_value) { + auto* opt = reinterpret_cast(jhandle); + opt->wal_recovery_mode = + ROCKSDB_NAMESPACE::WALRecoveryModeJni::toCppWALRecoveryMode( + jwal_recovery_mode_value); +} + +/* + * Class: org_rocksdb_Options + * Method: walRecoveryMode + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_walRecoveryMode(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::WALRecoveryModeJni::toJavaWALRecoveryMode( + opt->wal_recovery_mode); +} + +/* + * Class: org_rocksdb_Options + * Method: setAllow2pc + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllow2pc(JNIEnv*, jobject, jlong jhandle, + jboolean jallow_2pc) { + auto* opt = reinterpret_cast(jhandle); + opt->allow_2pc = static_cast(jallow_2pc); +} + +/* + * Class: org_rocksdb_Options + * Method: allow2pc + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allow2pc(JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_2pc); +} + +/* + * Class: org_rocksdb_Options + * Method: setRowCache + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setRowCache(JNIEnv*, jobject, jlong jhandle, + jlong jrow_cache_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* row_cache = + reinterpret_cast*>( + jrow_cache_handle); + opt->row_cache = *row_cache; +} + +/* + * Class: org_rocksdb_Options + * Method: setWalFilter + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWalFilter(JNIEnv*, jobject, jlong jhandle, + jlong jwal_filter_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* wal_filter = reinterpret_cast( + jwal_filter_handle); + opt->wal_filter = wal_filter; +} + +/* + * Class: org_rocksdb_Options + * Method: setFailIfOptionsFileError + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setFailIfOptionsFileError( + JNIEnv*, jobject, jlong jhandle, jboolean jfail_if_options_file_error) { + auto* opt = reinterpret_cast(jhandle); + opt->fail_if_options_file_error = + static_cast(jfail_if_options_file_error); +} + +/* + * Class: org_rocksdb_Options + * Method: failIfOptionsFileError + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_failIfOptionsFileError(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->fail_if_options_file_error); +} + +/* + * Class: org_rocksdb_Options + * Method: setDumpMallocStats + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setDumpMallocStats(JNIEnv*, jobject, + jlong jhandle, + jboolean jdump_malloc_stats) { + auto* opt = reinterpret_cast(jhandle); + opt->dump_malloc_stats = static_cast(jdump_malloc_stats); +} + +/* + * Class: org_rocksdb_Options + * Method: dumpMallocStats + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_dumpMallocStats(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->dump_malloc_stats); +} + +/* + * Class: org_rocksdb_Options + * Method: setAvoidFlushDuringRecovery + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAvoidFlushDuringRecovery( + JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_recovery) { + auto* opt = reinterpret_cast(jhandle); + opt->avoid_flush_during_recovery = + static_cast(javoid_flush_during_recovery); +} + +/* + * Class: org_rocksdb_Options + * Method: avoidFlushDuringRecovery + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_avoidFlushDuringRecovery(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->avoid_flush_during_recovery); +} + +/* + * Class: org_rocksdb_Options + * Method: setAvoidUnnecessaryBlockingIO + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAvoidUnnecessaryBlockingIO( + JNIEnv*, jclass, jlong jhandle, jboolean avoid_blocking_io) { + auto* opt = reinterpret_cast(jhandle); + opt->avoid_unnecessary_blocking_io = static_cast(avoid_blocking_io); +} + +/* + * Class: org_rocksdb_Options + * Method: avoidUnnecessaryBlockingIO + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_avoidUnnecessaryBlockingIO(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->avoid_unnecessary_blocking_io); +} + +/* + * Class: org_rocksdb_Options + * Method: setPersistStatsToDisk + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setPersistStatsToDisk( + JNIEnv*, jclass, jlong jhandle, jboolean persist_stats_to_disk) { + auto* opt = reinterpret_cast(jhandle); + opt->persist_stats_to_disk = static_cast(persist_stats_to_disk); +} + +/* + * Class: org_rocksdb_Options + * Method: persistStatsToDisk + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_persistStatsToDisk(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->persist_stats_to_disk); +} + +/* + * Class: org_rocksdb_Options + * Method: setWriteDbidToManifest + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setWriteDbidToManifest( + JNIEnv*, jclass, jlong jhandle, jboolean jwrite_dbid_to_manifest) { + auto* opt = reinterpret_cast(jhandle); + opt->write_dbid_to_manifest = static_cast(jwrite_dbid_to_manifest); +} + +/* + * Class: org_rocksdb_Options + * Method: writeDbidToManifest + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_writeDbidToManifest(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->write_dbid_to_manifest); +} + +/* + * Class: org_rocksdb_Options + * Method: setLogReadaheadSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setLogReadaheadSize(JNIEnv*, jclass, + jlong jhandle, + jlong jlog_readahead_size) { + auto* opt = reinterpret_cast(jhandle); + opt->log_readahead_size = static_cast(jlog_readahead_size); +} + +/* + * Class: org_rocksdb_Options + * Method: logReasaheadSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_logReadaheadSize(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->log_readahead_size); +} + +/* + * Class: org_rocksdb_Options + * Method: setBestEffortsRecovery + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setBestEffortsRecovery( + JNIEnv*, jclass, jlong jhandle, jboolean jbest_efforts_recovery) { + auto* opt = reinterpret_cast(jhandle); + opt->best_efforts_recovery = static_cast(jbest_efforts_recovery); +} + +/* + * Class: org_rocksdb_Options + * Method: bestEffortsRecovery + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_bestEffortsRecovery(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->best_efforts_recovery); +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBgErrorResumeCount + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxBgErrorResumeCount( + JNIEnv*, jclass, jlong jhandle, jint jmax_bgerror_resume_count) { + auto* opt = reinterpret_cast(jhandle); + opt->max_bgerror_resume_count = static_cast(jmax_bgerror_resume_count); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBgerrorResumeCount + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxBgerrorResumeCount(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_bgerror_resume_count); +} + +/* + * Class: org_rocksdb_Options + * Method: setBgerrorResumeRetryInterval + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setBgerrorResumeRetryInterval( + JNIEnv*, jclass, jlong jhandle, jlong jbgerror_resume_retry_interval) { + auto* opt = reinterpret_cast(jhandle); + opt->bgerror_resume_retry_interval = + static_cast(jbgerror_resume_retry_interval); +} + +/* + * Class: org_rocksdb_Options + * Method: bgerrorResumeRetryInterval + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_bgerrorResumeRetryInterval(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->bgerror_resume_retry_interval); +} + +/* + * Class: org_rocksdb_Options + * Method: setAvoidFlushDuringShutdown + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAvoidFlushDuringShutdown( + JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_shutdown) { + auto* opt = reinterpret_cast(jhandle); + opt->avoid_flush_during_shutdown = + static_cast(javoid_flush_during_shutdown); +} + +/* + * Class: org_rocksdb_Options + * Method: avoidFlushDuringShutdown + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_avoidFlushDuringShutdown(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->avoid_flush_during_shutdown); +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowIngestBehind + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowIngestBehind( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_ingest_behind) { + auto* opt = reinterpret_cast(jhandle); + opt->allow_ingest_behind = jallow_ingest_behind == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: allowIngestBehind + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowIngestBehind(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_ingest_behind); +} + +/* + * Class: org_rocksdb_Options + * Method: setTwoWriteQueues + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setTwoWriteQueues(JNIEnv*, jobject, jlong jhandle, + jboolean jtwo_write_queues) { + auto* opt = reinterpret_cast(jhandle); + opt->two_write_queues = jtwo_write_queues == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: twoWriteQueues + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_twoWriteQueues(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->two_write_queues); +} + +/* + * Class: org_rocksdb_Options + * Method: setManualWalFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setManualWalFlush(JNIEnv*, jobject, jlong jhandle, + jboolean jmanual_wal_flush) { + auto* opt = reinterpret_cast(jhandle); + opt->manual_wal_flush = jmanual_wal_flush == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: manualWalFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_manualWalFlush(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->manual_wal_flush); +} + +/* + * Class: org_rocksdb_Options + * Method: setAtomicFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAtomicFlush(JNIEnv*, jobject, jlong jhandle, + jboolean jatomic_flush) { + auto* opt = reinterpret_cast(jhandle); + opt->atomic_flush = jatomic_flush == JNI_TRUE; +} + +/* + * Class: org_rocksdb_Options + * Method: atomicFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_atomicFlush(JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->atomic_flush); +} + +/* + * Method: tableFactoryName + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_Options_tableFactoryName(JNIEnv* env, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::TableFactory* tf = opt->table_factory.get(); + + // Should never be nullptr. + // Default memtable factory is SkipListFactory + assert(tf); + + return env->NewStringUTF(tf->Name()); +} + +/* + * Class: org_rocksdb_Options + * Method: minWriteBufferNumberToMerge + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->min_write_buffer_number_to_merge; +} + +/* + * Class: org_rocksdb_Options + * Method: setMinWriteBufferNumberToMerge + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge( + JNIEnv*, jobject, jlong jhandle, jint jmin_write_buffer_number_to_merge) { + reinterpret_cast(jhandle) + ->min_write_buffer_number_to_merge = + static_cast(jmin_write_buffer_number_to_merge); +} +/* + * Class: org_rocksdb_Options + * Method: maxWriteBufferNumberToMaintain + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxWriteBufferNumberToMaintain(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_write_buffer_number_to_maintain; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxWriteBufferNumberToMaintain + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxWriteBufferNumberToMaintain( + JNIEnv*, jobject, jlong jhandle, + jint jmax_write_buffer_number_to_maintain) { + reinterpret_cast(jhandle) + ->max_write_buffer_number_to_maintain = + static_cast(jmax_write_buffer_number_to_maintain); +} + +/* + * Class: org_rocksdb_Options + * Method: setCompressionType + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setCompressionType( + JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) { + auto* opts = reinterpret_cast(jhandle); + opts->compression = + ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType( + jcompression_type_value); +} + +/* + * Class: org_rocksdb_Options + * Method: compressionType + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_compressionType(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType( + opts->compression); +} + +/** + * Helper method to convert a Java byte array of compression levels + * to a C++ vector of ROCKSDB_NAMESPACE::CompressionType + * + * @param env A pointer to the Java environment + * @param jcompression_levels A reference to a java byte array + * where each byte indicates a compression level + * + * @return A std::unique_ptr to the vector, or std::unique_ptr(nullptr) if a JNI + * exception occurs + */ +std::unique_ptr> +rocksdb_compression_vector_helper(JNIEnv* env, jbyteArray jcompression_levels) { + jsize len = env->GetArrayLength(jcompression_levels); + jbyte* jcompression_level = + env->GetByteArrayElements(jcompression_levels, nullptr); + if (jcompression_level == nullptr) { + // exception thrown: OutOfMemoryError + return std::unique_ptr>(); + } + + auto* compression_levels = + new std::vector(); + std::unique_ptr> + uptr_compression_levels(compression_levels); + + for (jsize i = 0; i < len; i++) { + jbyte jcl = jcompression_level[i]; + compression_levels->push_back( + static_cast(jcl)); + } + + env->ReleaseByteArrayElements(jcompression_levels, jcompression_level, + JNI_ABORT); + + return uptr_compression_levels; +} + +/** + * Helper method to convert a C++ vector of ROCKSDB_NAMESPACE::CompressionType + * to a Java byte array of compression levels + * + * @param env A pointer to the Java environment + * @param jcompression_levels A reference to a java byte array + * where each byte indicates a compression level + * + * @return A jbytearray or nullptr if an exception occurs + */ +jbyteArray rocksdb_compression_list_helper( + JNIEnv* env, + std::vector compression_levels) { + const size_t len = compression_levels.size(); + jbyte* jbuf = new jbyte[len]; + + for (size_t i = 0; i < len; i++) { + jbuf[i] = compression_levels[i]; + } + + // insert in java array + jbyteArray jcompression_levels = env->NewByteArray(static_cast(len)); + if (jcompression_levels == nullptr) { + // exception thrown: OutOfMemoryError + delete[] jbuf; + return nullptr; + } + env->SetByteArrayRegion(jcompression_levels, 0, static_cast(len), + jbuf); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jcompression_levels); + delete[] jbuf; + return nullptr; + } + + delete[] jbuf; + + return jcompression_levels; +} + +/* + * Class: org_rocksdb_Options + * Method: setCompressionPerLevel + * Signature: (J[B)V + */ +void Java_org_rocksdb_Options_setCompressionPerLevel( + JNIEnv* env, jobject, jlong jhandle, jbyteArray jcompressionLevels) { + auto uptr_compression_levels = + rocksdb_compression_vector_helper(env, jcompressionLevels); + if (!uptr_compression_levels) { + // exception occurred + return; + } + auto* options = reinterpret_cast(jhandle); + options->compression_per_level = *(uptr_compression_levels.get()); +} + +/* + * Class: org_rocksdb_Options + * Method: compressionPerLevel + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_Options_compressionPerLevel(JNIEnv* env, jobject, + jlong jhandle) { + auto* options = reinterpret_cast(jhandle); + return rocksdb_compression_list_helper(env, options->compression_per_level); +} + +/* + * Class: org_rocksdb_Options + * Method: setBottommostCompressionType + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setBottommostCompressionType( + JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) { + auto* options = reinterpret_cast(jhandle); + options->bottommost_compression = + ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType( + jcompression_type_value); +} + +/* + * Class: org_rocksdb_Options + * Method: bottommostCompressionType + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_bottommostCompressionType(JNIEnv*, jobject, + jlong jhandle) { + auto* options = reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType( + options->bottommost_compression); +} + +/* + * Class: org_rocksdb_Options + * Method: setBottommostCompressionOptions + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setBottommostCompressionOptions( + JNIEnv*, jobject, jlong jhandle, + jlong jbottommost_compression_options_handle) { + auto* options = reinterpret_cast(jhandle); + auto* bottommost_compression_options = + reinterpret_cast( + jbottommost_compression_options_handle); + options->bottommost_compression_opts = *bottommost_compression_options; +} + +/* + * Class: org_rocksdb_Options + * Method: setCompressionOptions + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setCompressionOptions( + JNIEnv*, jobject, jlong jhandle, jlong jcompression_options_handle) { + auto* options = reinterpret_cast(jhandle); + auto* compression_options = + reinterpret_cast( + jcompression_options_handle); + options->compression_opts = *compression_options; +} + +/* + * Class: org_rocksdb_Options + * Method: setCompactionStyle + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setCompactionStyle(JNIEnv*, jobject, + jlong jhandle, + jbyte jcompaction_style) { + auto* options = reinterpret_cast(jhandle); + options->compaction_style = + ROCKSDB_NAMESPACE::CompactionStyleJni::toCppCompactionStyle( + jcompaction_style); +} + +/* + * Class: org_rocksdb_Options + * Method: compactionStyle + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_compactionStyle(JNIEnv*, jobject, + jlong jhandle) { + auto* options = reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompactionStyleJni::toJavaCompactionStyle( + options->compaction_style); +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxTableFilesSizeFIFO + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxTableFilesSizeFIFO( + JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) { + reinterpret_cast(jhandle) + ->compaction_options_fifo.max_table_files_size = + static_cast(jmax_table_files_size); +} + +/* + * Class: org_rocksdb_Options + * Method: maxTableFilesSizeFIFO + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxTableFilesSizeFIFO(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->compaction_options_fifo.max_table_files_size; +} + +/* + * Class: org_rocksdb_Options + * Method: numLevels + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_numLevels(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->num_levels; +} + +/* + * Class: org_rocksdb_Options + * Method: setNumLevels + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setNumLevels(JNIEnv*, jobject, jlong jhandle, + jint jnum_levels) { + reinterpret_cast(jhandle)->num_levels = + static_cast(jnum_levels); +} + +/* + * Class: org_rocksdb_Options + * Method: levelZeroFileNumCompactionTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_levelZeroFileNumCompactionTrigger(JNIEnv*, + jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_file_num_compaction_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevelZeroFileNumCompactionTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevelZeroFileNumCompactionTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_file_num_compaction_trigger) { + reinterpret_cast(jhandle) + ->level0_file_num_compaction_trigger = + static_cast(jlevel0_file_num_compaction_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: levelZeroSlowdownWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_levelZeroSlowdownWritesTrigger(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_slowdown_writes_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevelSlowdownWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevelZeroSlowdownWritesTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) { + reinterpret_cast(jhandle) + ->level0_slowdown_writes_trigger = + static_cast(jlevel0_slowdown_writes_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: levelZeroStopWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_levelZeroStopWritesTrigger(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_stop_writes_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevelStopWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevelZeroStopWritesTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) { + reinterpret_cast(jhandle) + ->level0_stop_writes_trigger = + static_cast(jlevel0_stop_writes_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: targetFileSizeBase + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_targetFileSizeBase(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->target_file_size_base; +} + +/* + * Class: org_rocksdb_Options + * Method: setTargetFileSizeBase + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setTargetFileSizeBase( + JNIEnv*, jobject, jlong jhandle, jlong jtarget_file_size_base) { + reinterpret_cast(jhandle) + ->target_file_size_base = static_cast(jtarget_file_size_base); +} + +/* + * Class: org_rocksdb_Options + * Method: targetFileSizeMultiplier + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_targetFileSizeMultiplier(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->target_file_size_multiplier; +} + +/* + * Class: org_rocksdb_Options + * Method: setTargetFileSizeMultiplier + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setTargetFileSizeMultiplier( + JNIEnv*, jobject, jlong jhandle, jint jtarget_file_size_multiplier) { + reinterpret_cast(jhandle) + ->target_file_size_multiplier = + static_cast(jtarget_file_size_multiplier); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBytesForLevelBase + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxBytesForLevelBase(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_bytes_for_level_base; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBytesForLevelBase + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxBytesForLevelBase( + JNIEnv*, jobject, jlong jhandle, jlong jmax_bytes_for_level_base) { + reinterpret_cast(jhandle) + ->max_bytes_for_level_base = + static_cast(jmax_bytes_for_level_base); +} + +/* + * Class: org_rocksdb_Options + * Method: levelCompactionDynamicLevelBytes + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_levelCompactionDynamicLevelBytes( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->level_compaction_dynamic_level_bytes; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevelCompactionDynamicLevelBytes + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setLevelCompactionDynamicLevelBytes( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_dynamic_level_bytes) { + reinterpret_cast(jhandle) + ->level_compaction_dynamic_level_bytes = (jenable_dynamic_level_bytes); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBytesForLevelMultiplier + * Signature: (J)D + */ +jdouble Java_org_rocksdb_Options_maxBytesForLevelMultiplier(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_bytes_for_level_multiplier; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBytesForLevelMultiplier + * Signature: (JD)V + */ +void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplier( + JNIEnv*, jobject, jlong jhandle, jdouble jmax_bytes_for_level_multiplier) { + reinterpret_cast(jhandle) + ->max_bytes_for_level_multiplier = + static_cast(jmax_bytes_for_level_multiplier); +} + +/* + * Class: org_rocksdb_Options + * Method: maxCompactionBytes + * Signature: (J)I + */ +jlong Java_org_rocksdb_Options_maxCompactionBytes(JNIEnv*, jobject, + jlong jhandle) { + return static_cast( + reinterpret_cast(jhandle) + ->max_compaction_bytes); +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxCompactionBytes + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxCompactionBytes( + JNIEnv*, jobject, jlong jhandle, jlong jmax_compaction_bytes) { + reinterpret_cast(jhandle)->max_compaction_bytes = + static_cast(jmax_compaction_bytes); +} + +/* + * Class: org_rocksdb_Options + * Method: arenaBlockSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_arenaBlockSize(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->arena_block_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setArenaBlockSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setArenaBlockSize(JNIEnv* env, jobject, + jlong jhandle, + jlong jarena_block_size) { + auto s = + ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jarena_block_size); + if (s.ok()) { + reinterpret_cast(jhandle)->arena_block_size = + jarena_block_size; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: disableAutoCompactions + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_disableAutoCompactions(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->disable_auto_compactions; +} + +/* + * Class: org_rocksdb_Options + * Method: setDisableAutoCompactions + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setDisableAutoCompactions( + JNIEnv*, jobject, jlong jhandle, jboolean jdisable_auto_compactions) { + reinterpret_cast(jhandle) + ->disable_auto_compactions = static_cast(jdisable_auto_compactions); +} + +/* + * Class: org_rocksdb_Options + * Method: maxSequentialSkipInIterations + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxSequentialSkipInIterations(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_sequential_skip_in_iterations; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxSequentialSkipInIterations + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxSequentialSkipInIterations( + JNIEnv*, jobject, jlong jhandle, jlong jmax_sequential_skip_in_iterations) { + reinterpret_cast(jhandle) + ->max_sequential_skip_in_iterations = + static_cast(jmax_sequential_skip_in_iterations); +} + +/* + * Class: org_rocksdb_Options + * Method: inplaceUpdateSupport + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_inplaceUpdateSupport(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->inplace_update_support; +} + +/* + * Class: org_rocksdb_Options + * Method: setInplaceUpdateSupport + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setInplaceUpdateSupport( + JNIEnv*, jobject, jlong jhandle, jboolean jinplace_update_support) { + reinterpret_cast(jhandle) + ->inplace_update_support = static_cast(jinplace_update_support); +} + +/* + * Class: org_rocksdb_Options + * Method: inplaceUpdateNumLocks + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_inplaceUpdateNumLocks(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->inplace_update_num_locks; +} + +/* + * Class: org_rocksdb_Options + * Method: setInplaceUpdateNumLocks + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setInplaceUpdateNumLocks( + JNIEnv* env, jobject, jlong jhandle, jlong jinplace_update_num_locks) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + jinplace_update_num_locks); + if (s.ok()) { + reinterpret_cast(jhandle) + ->inplace_update_num_locks = jinplace_update_num_locks; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: memtablePrefixBloomSizeRatio + * Signature: (J)I + */ +jdouble Java_org_rocksdb_Options_memtablePrefixBloomSizeRatio(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->memtable_prefix_bloom_size_ratio; +} + +/* + * Class: org_rocksdb_Options + * Method: setMemtablePrefixBloomSizeRatio + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMemtablePrefixBloomSizeRatio( + JNIEnv*, jobject, jlong jhandle, + jdouble jmemtable_prefix_bloom_size_ratio) { + reinterpret_cast(jhandle) + ->memtable_prefix_bloom_size_ratio = + static_cast(jmemtable_prefix_bloom_size_ratio); +} + +/* + * Class: org_rocksdb_Options + * Method: experimentalMempurgeThreshold + * Signature: (J)I + */ +jdouble Java_org_rocksdb_Options_experimentalMempurgeThreshold(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->experimental_mempurge_threshold; +} + +/* + * Class: org_rocksdb_Options + * Method: setExperimentalMempurgeThreshold + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setExperimentalMempurgeThreshold( + JNIEnv*, jobject, jlong jhandle, jdouble jexperimental_mempurge_threshold) { + reinterpret_cast(jhandle) + ->experimental_mempurge_threshold = + static_cast(jexperimental_mempurge_threshold); +} + +/* + * Class: org_rocksdb_Options + * Method: memtableWholeKeyFiltering + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_memtableWholeKeyFiltering(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->memtable_whole_key_filtering; +} + +/* + * Class: org_rocksdb_Options + * Method: setMemtableWholeKeyFiltering + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setMemtableWholeKeyFiltering( + JNIEnv*, jobject, jlong jhandle, jboolean jmemtable_whole_key_filtering) { + reinterpret_cast(jhandle) + ->memtable_whole_key_filtering = + static_cast(jmemtable_whole_key_filtering); +} + +/* + * Class: org_rocksdb_Options + * Method: bloomLocality + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_bloomLocality(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->bloom_locality; +} + +/* + * Class: org_rocksdb_Options + * Method: setBloomLocality + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setBloomLocality(JNIEnv*, jobject, jlong jhandle, + jint jbloom_locality) { + reinterpret_cast(jhandle)->bloom_locality = + static_cast(jbloom_locality); +} + +/* + * Class: org_rocksdb_Options + * Method: maxSuccessiveMerges + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxSuccessiveMerges(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_successive_merges; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxSuccessiveMerges + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxSuccessiveMerges( + JNIEnv* env, jobject, jlong jhandle, jlong jmax_successive_merges) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + jmax_successive_merges); + if (s.ok()) { + reinterpret_cast(jhandle) + ->max_successive_merges = jmax_successive_merges; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: optimizeFiltersForHits + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_optimizeFiltersForHits(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->optimize_filters_for_hits; +} + +/* + * Class: org_rocksdb_Options + * Method: setOptimizeFiltersForHits + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setOptimizeFiltersForHits( + JNIEnv*, jobject, jlong jhandle, jboolean joptimize_filters_for_hits) { + reinterpret_cast(jhandle) + ->optimize_filters_for_hits = + static_cast(joptimize_filters_for_hits); +} + +/* + * Class: org_rocksdb_Options + * Method: oldDefaults + * Signature: (JII)V + */ +void Java_org_rocksdb_Options_oldDefaults(JNIEnv*, jclass, jlong jhandle, + jint major_version, + jint minor_version) { + reinterpret_cast(jhandle)->OldDefaults( + major_version, minor_version); +} + +/* + * Class: org_rocksdb_Options + * Method: optimizeForSmallDb + * Signature: (J)V + */ +void Java_org_rocksdb_Options_optimizeForSmallDb__J(JNIEnv*, jobject, + jlong jhandle) { + reinterpret_cast(jhandle)->OptimizeForSmallDb(); +} + +/* + * Class: org_rocksdb_Options + * Method: optimizeForSmallDb + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_optimizeForSmallDb__JJ(JNIEnv*, jclass, + jlong jhandle, + jlong cache_handle) { + auto* cache_sptr_ptr = + reinterpret_cast*>( + cache_handle); + auto* options_ptr = reinterpret_cast(jhandle); + auto* cf_options_ptr = + static_cast(options_ptr); + cf_options_ptr->OptimizeForSmallDb(cache_sptr_ptr); +} + +/* + * Class: org_rocksdb_Options + * Method: optimizeForPointLookup + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_optimizeForPointLookup( + JNIEnv*, jobject, jlong jhandle, jlong block_cache_size_mb) { + reinterpret_cast(jhandle) + ->OptimizeForPointLookup(block_cache_size_mb); +} + +/* + * Class: org_rocksdb_Options + * Method: optimizeLevelStyleCompaction + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_optimizeLevelStyleCompaction( + JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) { + reinterpret_cast(jhandle) + ->OptimizeLevelStyleCompaction(memtable_memory_budget); +} + +/* + * Class: org_rocksdb_Options + * Method: optimizeUniversalStyleCompaction + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_optimizeUniversalStyleCompaction( + JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) { + reinterpret_cast(jhandle) + ->OptimizeUniversalStyleCompaction(memtable_memory_budget); +} + +/* + * Class: org_rocksdb_Options + * Method: prepareForBulkLoad + * Signature: (J)V + */ +void Java_org_rocksdb_Options_prepareForBulkLoad(JNIEnv*, jobject, + jlong jhandle) { + reinterpret_cast(jhandle)->PrepareForBulkLoad(); +} + +/* + * Class: org_rocksdb_Options + * Method: memtableHugePageSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_memtableHugePageSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->memtable_huge_page_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setMemtableHugePageSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMemtableHugePageSize( + JNIEnv* env, jobject, jlong jhandle, jlong jmemtable_huge_page_size) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + jmemtable_huge_page_size); + if (s.ok()) { + reinterpret_cast(jhandle) + ->memtable_huge_page_size = jmemtable_huge_page_size; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: softPendingCompactionBytesLimit + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_softPendingCompactionBytesLimit(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->soft_pending_compaction_bytes_limit; +} + +/* + * Class: org_rocksdb_Options + * Method: setSoftPendingCompactionBytesLimit + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setSoftPendingCompactionBytesLimit( + JNIEnv*, jobject, jlong jhandle, + jlong jsoft_pending_compaction_bytes_limit) { + reinterpret_cast(jhandle) + ->soft_pending_compaction_bytes_limit = + static_cast(jsoft_pending_compaction_bytes_limit); +} + +/* + * Class: org_rocksdb_Options + * Method: softHardCompactionBytesLimit + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_hardPendingCompactionBytesLimit(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->hard_pending_compaction_bytes_limit; +} + +/* + * Class: org_rocksdb_Options + * Method: setHardPendingCompactionBytesLimit + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setHardPendingCompactionBytesLimit( + JNIEnv*, jobject, jlong jhandle, + jlong jhard_pending_compaction_bytes_limit) { + reinterpret_cast(jhandle) + ->hard_pending_compaction_bytes_limit = + static_cast(jhard_pending_compaction_bytes_limit); +} + +/* + * Class: org_rocksdb_Options + * Method: level0FileNumCompactionTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_level0FileNumCompactionTrigger(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_file_num_compaction_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevel0FileNumCompactionTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevel0FileNumCompactionTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_file_num_compaction_trigger) { + reinterpret_cast(jhandle) + ->level0_file_num_compaction_trigger = + static_cast(jlevel0_file_num_compaction_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: level0SlowdownWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_level0SlowdownWritesTrigger(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_slowdown_writes_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevel0SlowdownWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevel0SlowdownWritesTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) { + reinterpret_cast(jhandle) + ->level0_slowdown_writes_trigger = + static_cast(jlevel0_slowdown_writes_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: level0StopWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_level0StopWritesTrigger(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_stop_writes_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevel0StopWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevel0StopWritesTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) { + reinterpret_cast(jhandle) + ->level0_stop_writes_trigger = + static_cast(jlevel0_stop_writes_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBytesForLevelMultiplierAdditional + * Signature: (J)[I + */ +jintArray Java_org_rocksdb_Options_maxBytesForLevelMultiplierAdditional( + JNIEnv* env, jobject, jlong jhandle) { + auto mbflma = reinterpret_cast(jhandle) + ->max_bytes_for_level_multiplier_additional; + + const size_t size = mbflma.size(); + + jint* additionals = new jint[size]; + for (size_t i = 0; i < size; i++) { + additionals[i] = static_cast(mbflma[i]); + } + + jsize jlen = static_cast(size); + jintArray result = env->NewIntArray(jlen); + if (result == nullptr) { + // exception thrown: OutOfMemoryError + delete[] additionals; + return nullptr; + } + + env->SetIntArrayRegion(result, 0, jlen, additionals); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(result); + delete[] additionals; + return nullptr; + } + + delete[] additionals; + + return result; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBytesForLevelMultiplierAdditional + * Signature: (J[I)V + */ +void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplierAdditional( + JNIEnv* env, jobject, jlong jhandle, + jintArray jmax_bytes_for_level_multiplier_additional) { + jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional); + jint* additionals = env->GetIntArrayElements( + jmax_bytes_for_level_multiplier_additional, nullptr); + if (additionals == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + auto* opt = reinterpret_cast(jhandle); + opt->max_bytes_for_level_multiplier_additional.clear(); + for (jsize i = 0; i < len; i++) { + opt->max_bytes_for_level_multiplier_additional.push_back( + static_cast(additionals[i])); + } + + env->ReleaseIntArrayElements(jmax_bytes_for_level_multiplier_additional, + additionals, JNI_ABORT); +} + +/* + * Class: org_rocksdb_Options + * Method: paranoidFileChecks + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_paranoidFileChecks(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->paranoid_file_checks; +} + +/* + * Class: org_rocksdb_Options + * Method: setParanoidFileChecks + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setParanoidFileChecks( + JNIEnv*, jobject, jlong jhandle, jboolean jparanoid_file_checks) { + reinterpret_cast(jhandle)->paranoid_file_checks = + static_cast(jparanoid_file_checks); +} + +/* + * Class: org_rocksdb_Options + * Method: setCompactionPriority + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setCompactionPriority( + JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_priority_value) { + auto* opts = reinterpret_cast(jhandle); + opts->compaction_pri = + ROCKSDB_NAMESPACE::CompactionPriorityJni::toCppCompactionPriority( + jcompaction_priority_value); +} + +/* + * Class: org_rocksdb_Options + * Method: compactionPriority + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_compactionPriority(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompactionPriorityJni::toJavaCompactionPriority( + opts->compaction_pri); +} + +/* + * Class: org_rocksdb_Options + * Method: setReportBgIoStats + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setReportBgIoStats(JNIEnv*, jobject, + jlong jhandle, + jboolean jreport_bg_io_stats) { + auto* opts = reinterpret_cast(jhandle); + opts->report_bg_io_stats = static_cast(jreport_bg_io_stats); +} + +/* + * Class: org_rocksdb_Options + * Method: reportBgIoStats + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_reportBgIoStats(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->report_bg_io_stats); +} + +/* + * Class: org_rocksdb_Options + * Method: setTtl + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setTtl(JNIEnv*, jobject, jlong jhandle, + jlong jttl) { + auto* opts = reinterpret_cast(jhandle); + opts->ttl = static_cast(jttl); +} + +/* + * Class: org_rocksdb_Options + * Method: ttl + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_ttl(JNIEnv*, jobject, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->ttl); +} + +/* + * Class: org_rocksdb_Options + * Method: setPeriodicCompactionSeconds + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setPeriodicCompactionSeconds( + JNIEnv*, jobject, jlong jhandle, jlong jperiodicCompactionSeconds) { + auto* opts = reinterpret_cast(jhandle); + opts->periodic_compaction_seconds = + static_cast(jperiodicCompactionSeconds); +} + +/* + * Class: org_rocksdb_Options + * Method: periodicCompactionSeconds + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_periodicCompactionSeconds(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->periodic_compaction_seconds); +} + +/* + * Class: org_rocksdb_Options + * Method: setCompactionOptionsUniversal + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setCompactionOptionsUniversal( + JNIEnv*, jobject, jlong jhandle, + jlong jcompaction_options_universal_handle) { + auto* opts = reinterpret_cast(jhandle); + auto* opts_uni = + reinterpret_cast( + jcompaction_options_universal_handle); + opts->compaction_options_universal = *opts_uni; +} + +/* + * Class: org_rocksdb_Options + * Method: setCompactionOptionsFIFO + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setCompactionOptionsFIFO( + JNIEnv*, jobject, jlong jhandle, jlong jcompaction_options_fifo_handle) { + auto* opts = reinterpret_cast(jhandle); + auto* opts_fifo = reinterpret_cast( + jcompaction_options_fifo_handle); + opts->compaction_options_fifo = *opts_fifo; +} + +/* + * Class: org_rocksdb_Options + * Method: setForceConsistencyChecks + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setForceConsistencyChecks( + JNIEnv*, jobject, jlong jhandle, jboolean jforce_consistency_checks) { + auto* opts = reinterpret_cast(jhandle); + opts->force_consistency_checks = static_cast(jforce_consistency_checks); +} + +/* + * Class: org_rocksdb_Options + * Method: forceConsistencyChecks + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_forceConsistencyChecks(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->force_consistency_checks); +} + +/// BLOB options + +/* + * Class: org_rocksdb_Options + * Method: setEnableBlobFiles + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setEnableBlobFiles(JNIEnv*, jobject, + jlong jhandle, + jboolean jenable_blob_files) { + auto* opts = reinterpret_cast(jhandle); + opts->enable_blob_files = static_cast(jenable_blob_files); +} + +/* + * Class: org_rocksdb_Options + * Method: enableBlobFiles + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_enableBlobFiles(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->enable_blob_files); +} + +/* + * Class: org_rocksdb_Options + * Method: setMinBlobSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMinBlobSize(JNIEnv*, jobject, jlong jhandle, + jlong jmin_blob_size) { + auto* opts = reinterpret_cast(jhandle); + opts->min_blob_size = static_cast(jmin_blob_size); +} + +/* + * Class: org_rocksdb_Options + * Method: minBlobSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_minBlobSize(JNIEnv*, jobject, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->min_blob_size); +} + +/* + * Class: org_rocksdb_Options + * Method: setBlobFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setBlobFileSize(JNIEnv*, jobject, jlong jhandle, + jlong jblob_file_size) { + auto* opts = reinterpret_cast(jhandle); + opts->blob_file_size = static_cast(jblob_file_size); +} + +/* + * Class: org_rocksdb_Options + * Method: blobFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_blobFileSize(JNIEnv*, jobject, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->blob_file_size); +} + +/* + * Class: org_rocksdb_Options + * Method: setBlobCompressionType + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setBlobCompressionType( + JNIEnv*, jobject, jlong jhandle, jbyte jblob_compression_type_value) { + auto* opts = reinterpret_cast(jhandle); + opts->blob_compression_type = + ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType( + jblob_compression_type_value); +} + +/* + * Class: org_rocksdb_Options + * Method: blobCompressionType + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_blobCompressionType(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType( + opts->blob_compression_type); +} + +/* + * Class: org_rocksdb_Options + * Method: setEnableBlobGarbageCollection + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setEnableBlobGarbageCollection( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_blob_garbage_collection) { + auto* opts = reinterpret_cast(jhandle); + opts->enable_blob_garbage_collection = + static_cast(jenable_blob_garbage_collection); +} + +/* + * Class: org_rocksdb_Options + * Method: enableBlobGarbageCollection + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_enableBlobGarbageCollection(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->enable_blob_garbage_collection); +} + +/* + * Class: org_rocksdb_Options + * Method: setBlobGarbageCollectionAgeCutoff + * Signature: (JD)V + */ +void Java_org_rocksdb_Options_setBlobGarbageCollectionAgeCutoff( + JNIEnv*, jobject, jlong jhandle, + jdouble jblob_garbage_collection_age_cutoff) { + auto* opts = reinterpret_cast(jhandle); + opts->blob_garbage_collection_age_cutoff = + static_cast(jblob_garbage_collection_age_cutoff); +} + +/* + * Class: org_rocksdb_Options + * Method: blobGarbageCollectionAgeCutoff + * Signature: (J)D + */ +jdouble Java_org_rocksdb_Options_blobGarbageCollectionAgeCutoff(JNIEnv*, + jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->blob_garbage_collection_age_cutoff); +} + +/* + * Class: org_rocksdb_Options + * Method: setBlobGarbageCollectionForceThreshold + * Signature: (JD)V + */ +void Java_org_rocksdb_Options_setBlobGarbageCollectionForceThreshold( + JNIEnv*, jobject, jlong jhandle, + jdouble jblob_garbage_collection_force_threshold) { + auto* opts = reinterpret_cast(jhandle); + opts->blob_garbage_collection_force_threshold = + static_cast(jblob_garbage_collection_force_threshold); +} + +/* + * Class: org_rocksdb_Options + * Method: blobGarbageCollectionForceThreshold + * Signature: (J)D + */ +jdouble Java_org_rocksdb_Options_blobGarbageCollectionForceThreshold( + JNIEnv*, jobject, jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->blob_garbage_collection_force_threshold); +} + +/* + * Class: org_rocksdb_Options + * Method: setBlobCompactionReadaheadSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setBlobCompactionReadaheadSize( + JNIEnv*, jobject, jlong jhandle, jlong jblob_compaction_readahead_size) { + auto* opts = reinterpret_cast(jhandle); + opts->blob_compaction_readahead_size = + static_cast(jblob_compaction_readahead_size); +} + +/* + * Class: org_rocksdb_Options + * Method: blobCompactionReadaheadSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_blobCompactionReadaheadSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->blob_compaction_readahead_size); +} + +/* + * Class: org_rocksdb_Options + * Method: setBlobFileStartingLevel + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setBlobFileStartingLevel( + JNIEnv*, jobject, jlong jhandle, jint jblob_file_starting_level) { + auto* opts = reinterpret_cast(jhandle); + opts->blob_file_starting_level = jblob_file_starting_level; +} + +/* + * Class: org_rocksdb_Options + * Method: blobFileStartingLevel + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_blobFileStartingLevel(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return static_cast(opts->blob_file_starting_level); +} + +/* + * Class: org_rocksdb_Options + * Method: setPrepopulateBlobCache + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setPrepopulateBlobCache( + JNIEnv*, jobject, jlong jhandle, jbyte jprepopulate_blob_cache_value) { + auto* opts = reinterpret_cast(jhandle); + opts->prepopulate_blob_cache = + ROCKSDB_NAMESPACE::PrepopulateBlobCacheJni::toCppPrepopulateBlobCache( + jprepopulate_blob_cache_value); +} + +/* + * Class: org_rocksdb_Options + * Method: prepopulateBlobCache + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_prepopulateBlobCache(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::PrepopulateBlobCacheJni::toJavaPrepopulateBlobCache( + opts->prepopulate_blob_cache); +} + +////////////////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::ColumnFamilyOptions + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: newColumnFamilyOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions(JNIEnv*, + jclass) { + auto* op = new ROCKSDB_NAMESPACE::ColumnFamilyOptions(); + return GET_CPLUSPLUS_POINTER(op); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: copyColumnFamilyOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_copyColumnFamilyOptions( + JNIEnv*, jclass, jlong jhandle) { + auto new_opt = new ROCKSDB_NAMESPACE::ColumnFamilyOptions( + *(reinterpret_cast(jhandle))); + return GET_CPLUSPLUS_POINTER(new_opt); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: newColumnFamilyOptionsFromOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptionsFromOptions( + JNIEnv*, jclass, jlong joptions_handle) { + auto new_opt = new ROCKSDB_NAMESPACE::ColumnFamilyOptions( + *reinterpret_cast(joptions_handle)); + return GET_CPLUSPLUS_POINTER(new_opt); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: getColumnFamilyOptionsFromProps + * Signature: (JLjava/lang/String;)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps__JLjava_lang_String_2( + JNIEnv* env, jclass, jlong cfg_handle, jstring jopt_string) { + const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr); + if (opt_string == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + auto* config_options = + reinterpret_cast(cfg_handle); + auto* cf_options = new ROCKSDB_NAMESPACE::ColumnFamilyOptions(); + ROCKSDB_NAMESPACE::Status status = + ROCKSDB_NAMESPACE::GetColumnFamilyOptionsFromString( + *config_options, ROCKSDB_NAMESPACE::ColumnFamilyOptions(), opt_string, + cf_options); + + env->ReleaseStringUTFChars(jopt_string, opt_string); + + // Check if ColumnFamilyOptions creation was possible. + jlong ret_value = 0; + if (status.ok()) { + ret_value = GET_CPLUSPLUS_POINTER(cf_options); + } else { + // if operation failed the ColumnFamilyOptions need to be deleted + // again to prevent a memory leak. + delete cf_options; + } + return ret_value; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: getColumnFamilyOptionsFromProps + * Signature: (Ljava/util/String;)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps__Ljava_lang_String_2( + JNIEnv* env, jclass, jstring jopt_string) { + const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr); + if (opt_string == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + + auto* cf_options = new ROCKSDB_NAMESPACE::ColumnFamilyOptions(); + ROCKSDB_NAMESPACE::Status status = + ROCKSDB_NAMESPACE::GetColumnFamilyOptionsFromString( + ROCKSDB_NAMESPACE::ColumnFamilyOptions(), opt_string, cf_options); + + env->ReleaseStringUTFChars(jopt_string, opt_string); + + // Check if ColumnFamilyOptions creation was possible. + jlong ret_value = 0; + if (status.ok()) { + ret_value = GET_CPLUSPLUS_POINTER(cf_options); + } else { + // if operation failed the ColumnFamilyOptions need to be deleted + // again to prevent a memory leak. + delete cf_options; + } + return ret_value; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal(JNIEnv*, jobject, + jlong handle) { + auto* cfo = reinterpret_cast(handle); + assert(cfo != nullptr); + delete cfo; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: oldDefaults + * Signature: (JII)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_oldDefaults(JNIEnv*, jclass, + jlong jhandle, + jint major_version, + jint minor_version) { + reinterpret_cast(jhandle) + ->OldDefaults(major_version, minor_version); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: optimizeForSmallDb + * Signature: (J)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_optimizeForSmallDb__J(JNIEnv*, + jobject, + jlong jhandle) { + reinterpret_cast(jhandle) + ->OptimizeForSmallDb(); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: optimizeForSmallDb + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_optimizeForSmallDb__JJ( + JNIEnv*, jclass, jlong jhandle, jlong cache_handle) { + auto* cache_sptr_ptr = + reinterpret_cast*>( + cache_handle); + reinterpret_cast(jhandle) + ->OptimizeForSmallDb(cache_sptr_ptr); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: optimizeForPointLookup + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_optimizeForPointLookup( + JNIEnv*, jobject, jlong jhandle, jlong block_cache_size_mb) { + reinterpret_cast(jhandle) + ->OptimizeForPointLookup(block_cache_size_mb); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: optimizeLevelStyleCompaction + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_optimizeLevelStyleCompaction( + JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) { + reinterpret_cast(jhandle) + ->OptimizeLevelStyleCompaction(memtable_memory_budget); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: optimizeUniversalStyleCompaction + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_optimizeUniversalStyleCompaction( + JNIEnv*, jobject, jlong jhandle, jlong memtable_memory_budget) { + reinterpret_cast(jhandle) + ->OptimizeUniversalStyleCompaction(memtable_memory_budget); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setComparatorHandle + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JI( + JNIEnv*, jobject, jlong jhandle, jint builtinComparator) { + switch (builtinComparator) { + case 1: + reinterpret_cast(jhandle) + ->comparator = ROCKSDB_NAMESPACE::ReverseBytewiseComparator(); + break; + default: + reinterpret_cast(jhandle) + ->comparator = ROCKSDB_NAMESPACE::BytewiseComparator(); + break; + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setComparatorHandle + * Signature: (JJB)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JJB( + JNIEnv*, jobject, jlong jopt_handle, jlong jcomparator_handle, + jbyte jcomparator_type) { + ROCKSDB_NAMESPACE::Comparator* comparator = nullptr; + switch (jcomparator_type) { + // JAVA_COMPARATOR + case 0x0: + comparator = reinterpret_cast( + jcomparator_handle); + break; + + // JAVA_NATIVE_COMPARATOR_WRAPPER + case 0x1: + comparator = + reinterpret_cast(jcomparator_handle); + break; + } + auto* opt = + reinterpret_cast(jopt_handle); + opt->comparator = comparator; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMergeOperatorName + * Signature: (JJjava/lang/String)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperatorName( + JNIEnv* env, jobject, jlong jhandle, jstring jop_name) { + auto* options = + reinterpret_cast(jhandle); + const char* op_name = env->GetStringUTFChars(jop_name, nullptr); + if (op_name == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + options->merge_operator = + ROCKSDB_NAMESPACE::MergeOperators::CreateFromStringId(op_name); + env->ReleaseStringUTFChars(jop_name, op_name); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMergeOperator + * Signature: (JJjava/lang/String)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperator( + JNIEnv*, jobject, jlong jhandle, jlong mergeOperatorHandle) { + reinterpret_cast(jhandle) + ->merge_operator = + *(reinterpret_cast*>( + mergeOperatorHandle)); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionFilterHandle + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterHandle( + JNIEnv*, jobject, jlong jopt_handle, jlong jcompactionfilter_handle) { + reinterpret_cast(jopt_handle) + ->compaction_filter = + reinterpret_cast( + jcompactionfilter_handle); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionFilterFactoryHandle + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterFactoryHandle( + JNIEnv*, jobject, jlong jopt_handle, + jlong jcompactionfilterfactory_handle) { + auto* cff_factory = reinterpret_cast< + std::shared_ptr*>( + jcompactionfilterfactory_handle); + reinterpret_cast(jopt_handle) + ->compaction_filter_factory = *cff_factory; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setWriteBufferSize + * Signature: (JJ)I + */ +void Java_org_rocksdb_ColumnFamilyOptions_setWriteBufferSize( + JNIEnv* env, jobject, jlong jhandle, jlong jwrite_buffer_size) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + jwrite_buffer_size); + if (s.ok()) { + reinterpret_cast(jhandle) + ->write_buffer_size = jwrite_buffer_size; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: writeBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_writeBufferSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->write_buffer_size; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxWriteBufferNumber + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumber( + JNIEnv*, jobject, jlong jhandle, jint jmax_write_buffer_number) { + reinterpret_cast(jhandle) + ->max_write_buffer_number = jmax_write_buffer_number; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxWriteBufferNumber + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumber(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_write_buffer_number; +} + +/* + * Method: setMemTableFactory + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMemTableFactory( + JNIEnv*, jobject, jlong jhandle, jlong jfactory_handle) { + reinterpret_cast(jhandle) + ->memtable_factory.reset( + reinterpret_cast( + jfactory_handle)); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: memTableFactoryName + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_ColumnFamilyOptions_memTableFactoryName( + JNIEnv* env, jobject, jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::MemTableRepFactory* tf = opt->memtable_factory.get(); + + // Should never be nullptr. + // Default memtable factory is SkipListFactory + assert(tf); + + // temporarly fix for the historical typo + if (strcmp(tf->Name(), "HashLinkListRepFactory") == 0) { + return env->NewStringUTF("HashLinkedListRepFactory"); + } + + return env->NewStringUTF(tf->Name()); +} + +/* + * Method: useFixedLengthPrefixExtractor + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_useFixedLengthPrefixExtractor( + JNIEnv*, jobject, jlong jhandle, jint jprefix_length) { + reinterpret_cast(jhandle) + ->prefix_extractor.reset(ROCKSDB_NAMESPACE::NewFixedPrefixTransform( + static_cast(jprefix_length))); +} + +/* + * Method: useCappedPrefixExtractor + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_useCappedPrefixExtractor( + JNIEnv*, jobject, jlong jhandle, jint jprefix_length) { + reinterpret_cast(jhandle) + ->prefix_extractor.reset(ROCKSDB_NAMESPACE::NewCappedPrefixTransform( + static_cast(jprefix_length))); +} + +/* + * Method: setTableFactory + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setTableFactory( + JNIEnv*, jobject, jlong jhandle, jlong jfactory_handle) { + reinterpret_cast(jhandle) + ->table_factory.reset( + reinterpret_cast(jfactory_handle)); +} + +/* + * Method: setSstPartitionerFactory + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setSstPartitionerFactory( + JNIEnv*, jobject, jlong jhandle, jlong factory_handle) { + auto* options = + reinterpret_cast(jhandle); + auto factory = reinterpret_cast< + std::shared_ptr*>( + factory_handle); + options->sst_partitioner_factory = *factory; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionThreadLimiter + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionThreadLimiter( + JNIEnv*, jclass, jlong jhandle, jlong jlimiter_handle) { + auto* options = + reinterpret_cast(jhandle); + auto* limiter = reinterpret_cast< + std::shared_ptr*>( + jlimiter_handle); + options->compaction_thread_limiter = *limiter; +} + +/* + * Method: tableFactoryName + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName(JNIEnv* env, + jobject, + jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::TableFactory* tf = opt->table_factory.get(); + + // Should never be nullptr. + // Default memtable factory is SkipListFactory + assert(tf); + + return env->NewStringUTF(tf->Name()); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCfPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCfPaths(JNIEnv* env, jclass, + jlong jhandle, + jobjectArray path_array, + jlongArray size_array) { + auto* options = + reinterpret_cast(jhandle); + jboolean has_exception = JNI_FALSE; + std::vector cf_paths = + rocksdb_convert_cf_paths_from_java_helper(env, path_array, size_array, + &has_exception); + if (JNI_FALSE == has_exception) { + options->cf_paths = std::move(cf_paths); + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: cfPathsLen + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_cfPathsLen(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = + reinterpret_cast(jhandle); + return static_cast(opt->cf_paths.size()); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: cfPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_cfPaths(JNIEnv* env, jclass, + jlong jhandle, + jobjectArray jpaths, + jlongArray jtarget_sizes) { + rocksdb_convert_cf_paths_to_java_helper< + ROCKSDB_NAMESPACE::ColumnFamilyOptions>(env, jhandle, jpaths, + jtarget_sizes); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: minWriteBufferNumberToMerge + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_minWriteBufferNumberToMerge( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->min_write_buffer_number_to_merge; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMinWriteBufferNumberToMerge + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMinWriteBufferNumberToMerge( + JNIEnv*, jobject, jlong jhandle, jint jmin_write_buffer_number_to_merge) { + reinterpret_cast(jhandle) + ->min_write_buffer_number_to_merge = + static_cast(jmin_write_buffer_number_to_merge); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxWriteBufferNumberToMaintain + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumberToMaintain( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_write_buffer_number_to_maintain; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxWriteBufferNumberToMaintain + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumberToMaintain( + JNIEnv*, jobject, jlong jhandle, + jint jmax_write_buffer_number_to_maintain) { + reinterpret_cast(jhandle) + ->max_write_buffer_number_to_maintain = + static_cast(jmax_write_buffer_number_to_maintain); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompressionType + * Signature: (JB)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompressionType( + JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) { + auto* cf_opts = + reinterpret_cast(jhandle); + cf_opts->compression = + ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType( + jcompression_type_value); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: compressionType + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType(JNIEnv*, jobject, + jlong jhandle) { + auto* cf_opts = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType( + cf_opts->compression); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompressionPerLevel + * Signature: (J[B)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel( + JNIEnv* env, jobject, jlong jhandle, jbyteArray jcompressionLevels) { + auto* options = + reinterpret_cast(jhandle); + auto uptr_compression_levels = + rocksdb_compression_vector_helper(env, jcompressionLevels); + if (!uptr_compression_levels) { + // exception occurred + return; + } + options->compression_per_level = *(uptr_compression_levels.get()); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: compressionPerLevel + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel( + JNIEnv* env, jobject, jlong jhandle) { + auto* cf_options = + reinterpret_cast(jhandle); + return rocksdb_compression_list_helper(env, + cf_options->compression_per_level); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setBottommostCompressionType + * Signature: (JB)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionType( + JNIEnv*, jobject, jlong jhandle, jbyte jcompression_type_value) { + auto* cf_options = + reinterpret_cast(jhandle); + cf_options->bottommost_compression = + ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType( + jcompression_type_value); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: bottommostCompressionType + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ColumnFamilyOptions_bottommostCompressionType( + JNIEnv*, jobject, jlong jhandle) { + auto* cf_options = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType( + cf_options->bottommost_compression); +} +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setBottommostCompressionOptions + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setBottommostCompressionOptions( + JNIEnv*, jobject, jlong jhandle, + jlong jbottommost_compression_options_handle) { + auto* cf_options = + reinterpret_cast(jhandle); + auto* bottommost_compression_options = + reinterpret_cast( + jbottommost_compression_options_handle); + cf_options->bottommost_compression_opts = *bottommost_compression_options; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompressionOptions + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompressionOptions( + JNIEnv*, jobject, jlong jhandle, jlong jcompression_options_handle) { + auto* cf_options = + reinterpret_cast(jhandle); + auto* compression_options = + reinterpret_cast( + jcompression_options_handle); + cf_options->compression_opts = *compression_options; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionStyle + * Signature: (JB)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionStyle( + JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_style) { + auto* cf_options = + reinterpret_cast(jhandle); + cf_options->compaction_style = + ROCKSDB_NAMESPACE::CompactionStyleJni::toCppCompactionStyle( + jcompaction_style); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: compactionStyle + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionStyle(JNIEnv*, jobject, + jlong jhandle) { + auto* cf_options = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompactionStyleJni::toJavaCompactionStyle( + cf_options->compaction_style); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxTableFilesSizeFIFO + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxTableFilesSizeFIFO( + JNIEnv*, jobject, jlong jhandle, jlong jmax_table_files_size) { + reinterpret_cast(jhandle) + ->compaction_options_fifo.max_table_files_size = + static_cast(jmax_table_files_size); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxTableFilesSizeFIFO + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_maxTableFilesSizeFIFO( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->compaction_options_fifo.max_table_files_size; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: numLevels + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_numLevels(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->num_levels; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setNumLevels + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setNumLevels(JNIEnv*, jobject, + jlong jhandle, + jint jnum_levels) { + reinterpret_cast(jhandle) + ->num_levels = static_cast(jnum_levels); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: levelZeroFileNumCompactionTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroFileNumCompactionTrigger( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_file_num_compaction_trigger; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setLevelZeroFileNumCompactionTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroFileNumCompactionTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_file_num_compaction_trigger) { + reinterpret_cast(jhandle) + ->level0_file_num_compaction_trigger = + static_cast(jlevel0_file_num_compaction_trigger); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: levelZeroSlowdownWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroSlowdownWritesTrigger( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_slowdown_writes_trigger; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setLevelSlowdownWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroSlowdownWritesTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) { + reinterpret_cast(jhandle) + ->level0_slowdown_writes_trigger = + static_cast(jlevel0_slowdown_writes_trigger); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: levelZeroStopWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroStopWritesTrigger( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_stop_writes_trigger; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setLevelStopWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroStopWritesTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) { + reinterpret_cast(jhandle) + ->level0_stop_writes_trigger = + static_cast(jlevel0_stop_writes_trigger); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: targetFileSizeBase + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeBase(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->target_file_size_base; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setTargetFileSizeBase + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeBase( + JNIEnv*, jobject, jlong jhandle, jlong jtarget_file_size_base) { + reinterpret_cast(jhandle) + ->target_file_size_base = static_cast(jtarget_file_size_base); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: targetFileSizeMultiplier + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeMultiplier( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->target_file_size_multiplier; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setTargetFileSizeMultiplier + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeMultiplier( + JNIEnv*, jobject, jlong jhandle, jint jtarget_file_size_multiplier) { + reinterpret_cast(jhandle) + ->target_file_size_multiplier = + static_cast(jtarget_file_size_multiplier); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxBytesForLevelBase + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelBase(JNIEnv*, + jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_bytes_for_level_base; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxBytesForLevelBase + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelBase( + JNIEnv*, jobject, jlong jhandle, jlong jmax_bytes_for_level_base) { + reinterpret_cast(jhandle) + ->max_bytes_for_level_base = + static_cast(jmax_bytes_for_level_base); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: levelCompactionDynamicLevelBytes + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_levelCompactionDynamicLevelBytes( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->level_compaction_dynamic_level_bytes; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setLevelCompactionDynamicLevelBytes + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setLevelCompactionDynamicLevelBytes( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_dynamic_level_bytes) { + reinterpret_cast(jhandle) + ->level_compaction_dynamic_level_bytes = (jenable_dynamic_level_bytes); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxBytesForLevelMultiplier + * Signature: (J)D + */ +jdouble Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplier( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_bytes_for_level_multiplier; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxBytesForLevelMultiplier + * Signature: (JD)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplier( + JNIEnv*, jobject, jlong jhandle, jdouble jmax_bytes_for_level_multiplier) { + reinterpret_cast(jhandle) + ->max_bytes_for_level_multiplier = + static_cast(jmax_bytes_for_level_multiplier); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxCompactionBytes + * Signature: (J)I + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_maxCompactionBytes(JNIEnv*, jobject, + jlong jhandle) { + return static_cast( + reinterpret_cast(jhandle) + ->max_compaction_bytes); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxCompactionBytes + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxCompactionBytes( + JNIEnv*, jobject, jlong jhandle, jlong jmax_compaction_bytes) { + reinterpret_cast(jhandle) + ->max_compaction_bytes = static_cast(jmax_compaction_bytes); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: arenaBlockSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_arenaBlockSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->arena_block_size; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setArenaBlockSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setArenaBlockSize( + JNIEnv* env, jobject, jlong jhandle, jlong jarena_block_size) { + auto s = + ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(jarena_block_size); + if (s.ok()) { + reinterpret_cast(jhandle) + ->arena_block_size = jarena_block_size; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: disableAutoCompactions + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_disableAutoCompactions( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->disable_auto_compactions; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setDisableAutoCompactions + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setDisableAutoCompactions( + JNIEnv*, jobject, jlong jhandle, jboolean jdisable_auto_compactions) { + reinterpret_cast(jhandle) + ->disable_auto_compactions = static_cast(jdisable_auto_compactions); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxSequentialSkipInIterations + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_maxSequentialSkipInIterations( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_sequential_skip_in_iterations; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxSequentialSkipInIterations + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxSequentialSkipInIterations( + JNIEnv*, jobject, jlong jhandle, jlong jmax_sequential_skip_in_iterations) { + reinterpret_cast(jhandle) + ->max_sequential_skip_in_iterations = + static_cast(jmax_sequential_skip_in_iterations); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: inplaceUpdateSupport + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateSupport( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->inplace_update_support; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setInplaceUpdateSupport + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateSupport( + JNIEnv*, jobject, jlong jhandle, jboolean jinplace_update_support) { + reinterpret_cast(jhandle) + ->inplace_update_support = static_cast(jinplace_update_support); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: inplaceUpdateNumLocks + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateNumLocks( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->inplace_update_num_locks; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setInplaceUpdateNumLocks + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateNumLocks( + JNIEnv* env, jobject, jlong jhandle, jlong jinplace_update_num_locks) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + jinplace_update_num_locks); + if (s.ok()) { + reinterpret_cast(jhandle) + ->inplace_update_num_locks = jinplace_update_num_locks; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: memtablePrefixBloomSizeRatio + * Signature: (J)I + */ +jdouble Java_org_rocksdb_ColumnFamilyOptions_memtablePrefixBloomSizeRatio( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->memtable_prefix_bloom_size_ratio; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMemtablePrefixBloomSizeRatio + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMemtablePrefixBloomSizeRatio( + JNIEnv*, jobject, jlong jhandle, + jdouble jmemtable_prefix_bloom_size_ratio) { + reinterpret_cast(jhandle) + ->memtable_prefix_bloom_size_ratio = + static_cast(jmemtable_prefix_bloom_size_ratio); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: experimentalMempurgeThreshold + * Signature: (J)I + */ +jdouble Java_org_rocksdb_ColumnFamilyOptions_experimentalMempurgeThreshold( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->experimental_mempurge_threshold; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setExperimentalMempurgeThreshold + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setExperimentalMempurgeThreshold( + JNIEnv*, jobject, jlong jhandle, jdouble jexperimental_mempurge_threshold) { + reinterpret_cast(jhandle) + ->experimental_mempurge_threshold = + static_cast(jexperimental_mempurge_threshold); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: memtableWholeKeyFiltering + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_memtableWholeKeyFiltering( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->memtable_whole_key_filtering; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMemtableWholeKeyFiltering + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMemtableWholeKeyFiltering( + JNIEnv*, jobject, jlong jhandle, jboolean jmemtable_whole_key_filtering) { + reinterpret_cast(jhandle) + ->memtable_whole_key_filtering = + static_cast(jmemtable_whole_key_filtering); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: bloomLocality + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_bloomLocality(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->bloom_locality; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setBloomLocality + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setBloomLocality( + JNIEnv*, jobject, jlong jhandle, jint jbloom_locality) { + reinterpret_cast(jhandle) + ->bloom_locality = static_cast(jbloom_locality); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxSuccessiveMerges + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_maxSuccessiveMerges(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_successive_merges; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxSuccessiveMerges + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxSuccessiveMerges( + JNIEnv* env, jobject, jlong jhandle, jlong jmax_successive_merges) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + jmax_successive_merges); + if (s.ok()) { + reinterpret_cast(jhandle) + ->max_successive_merges = jmax_successive_merges; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: optimizeFiltersForHits + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_optimizeFiltersForHits( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->optimize_filters_for_hits; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setOptimizeFiltersForHits + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setOptimizeFiltersForHits( + JNIEnv*, jobject, jlong jhandle, jboolean joptimize_filters_for_hits) { + reinterpret_cast(jhandle) + ->optimize_filters_for_hits = + static_cast(joptimize_filters_for_hits); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: memtableHugePageSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_memtableHugePageSize(JNIEnv*, + jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->memtable_huge_page_size; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMemtableHugePageSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMemtableHugePageSize( + JNIEnv* env, jobject, jlong jhandle, jlong jmemtable_huge_page_size) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + jmemtable_huge_page_size); + if (s.ok()) { + reinterpret_cast(jhandle) + ->memtable_huge_page_size = jmemtable_huge_page_size; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: softPendingCompactionBytesLimit + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_softPendingCompactionBytesLimit( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->soft_pending_compaction_bytes_limit; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setSoftPendingCompactionBytesLimit + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setSoftPendingCompactionBytesLimit( + JNIEnv*, jobject, jlong jhandle, + jlong jsoft_pending_compaction_bytes_limit) { + reinterpret_cast(jhandle) + ->soft_pending_compaction_bytes_limit = + static_cast(jsoft_pending_compaction_bytes_limit); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: softHardCompactionBytesLimit + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_hardPendingCompactionBytesLimit( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->hard_pending_compaction_bytes_limit; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setHardPendingCompactionBytesLimit + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setHardPendingCompactionBytesLimit( + JNIEnv*, jobject, jlong jhandle, + jlong jhard_pending_compaction_bytes_limit) { + reinterpret_cast(jhandle) + ->hard_pending_compaction_bytes_limit = + static_cast(jhard_pending_compaction_bytes_limit); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: level0FileNumCompactionTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_level0FileNumCompactionTrigger( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_file_num_compaction_trigger; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setLevel0FileNumCompactionTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setLevel0FileNumCompactionTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_file_num_compaction_trigger) { + reinterpret_cast(jhandle) + ->level0_file_num_compaction_trigger = + static_cast(jlevel0_file_num_compaction_trigger); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: level0SlowdownWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_level0SlowdownWritesTrigger( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_slowdown_writes_trigger; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setLevel0SlowdownWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setLevel0SlowdownWritesTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_slowdown_writes_trigger) { + reinterpret_cast(jhandle) + ->level0_slowdown_writes_trigger = + static_cast(jlevel0_slowdown_writes_trigger); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: level0StopWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_level0StopWritesTrigger( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->level0_stop_writes_trigger; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setLevel0StopWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setLevel0StopWritesTrigger( + JNIEnv*, jobject, jlong jhandle, jint jlevel0_stop_writes_trigger) { + reinterpret_cast(jhandle) + ->level0_stop_writes_trigger = + static_cast(jlevel0_stop_writes_trigger); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxBytesForLevelMultiplierAdditional + * Signature: (J)[I + */ +jintArray +Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplierAdditional( + JNIEnv* env, jobject, jlong jhandle) { + auto mbflma = + reinterpret_cast(jhandle) + ->max_bytes_for_level_multiplier_additional; + + const size_t size = mbflma.size(); + + jint* additionals = new jint[size]; + for (size_t i = 0; i < size; i++) { + additionals[i] = static_cast(mbflma[i]); + } + + jsize jlen = static_cast(size); + jintArray result = env->NewIntArray(jlen); + if (result == nullptr) { + // exception thrown: OutOfMemoryError + delete[] additionals; + return nullptr; + } + env->SetIntArrayRegion(result, 0, jlen, additionals); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(result); + delete[] additionals; + return nullptr; + } + + delete[] additionals; + + return result; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxBytesForLevelMultiplierAdditional + * Signature: (J[I)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplierAdditional( + JNIEnv* env, jobject, jlong jhandle, + jintArray jmax_bytes_for_level_multiplier_additional) { + jsize len = env->GetArrayLength(jmax_bytes_for_level_multiplier_additional); + jint* additionals = env->GetIntArrayElements( + jmax_bytes_for_level_multiplier_additional, nullptr); + if (additionals == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + auto* cf_opt = + reinterpret_cast(jhandle); + cf_opt->max_bytes_for_level_multiplier_additional.clear(); + for (jsize i = 0; i < len; i++) { + cf_opt->max_bytes_for_level_multiplier_additional.push_back( + static_cast(additionals[i])); + } + + env->ReleaseIntArrayElements(jmax_bytes_for_level_multiplier_additional, + additionals, JNI_ABORT); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: paranoidFileChecks + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_paranoidFileChecks( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->paranoid_file_checks; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setParanoidFileChecks + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setParanoidFileChecks( + JNIEnv*, jobject, jlong jhandle, jboolean jparanoid_file_checks) { + reinterpret_cast(jhandle) + ->paranoid_file_checks = static_cast(jparanoid_file_checks); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionPriority + * Signature: (JB)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionPriority( + JNIEnv*, jobject, jlong jhandle, jbyte jcompaction_priority_value) { + auto* cf_opts = + reinterpret_cast(jhandle); + cf_opts->compaction_pri = + ROCKSDB_NAMESPACE::CompactionPriorityJni::toCppCompactionPriority( + jcompaction_priority_value); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: compactionPriority + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionPriority(JNIEnv*, jobject, + jlong jhandle) { + auto* cf_opts = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompactionPriorityJni::toJavaCompactionPriority( + cf_opts->compaction_pri); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setReportBgIoStats + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setReportBgIoStats( + JNIEnv*, jobject, jlong jhandle, jboolean jreport_bg_io_stats) { + auto* cf_opts = + reinterpret_cast(jhandle); + cf_opts->report_bg_io_stats = static_cast(jreport_bg_io_stats); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: reportBgIoStats + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_reportBgIoStats(JNIEnv*, jobject, + jlong jhandle) { + auto* cf_opts = + reinterpret_cast(jhandle); + return static_cast(cf_opts->report_bg_io_stats); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setTtl + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setTtl(JNIEnv*, jobject, + jlong jhandle, jlong jttl) { + auto* cf_opts = + reinterpret_cast(jhandle); + cf_opts->ttl = static_cast(jttl); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: ttl + * Signature: (J)J + */ +JNIEXPORT jlong JNICALL +Java_org_rocksdb_ColumnFamilyOptions_ttl(JNIEnv*, jobject, jlong jhandle) { + auto* cf_opts = + reinterpret_cast(jhandle); + return static_cast(cf_opts->ttl); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setPeriodicCompactionSeconds + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setPeriodicCompactionSeconds( + JNIEnv*, jobject, jlong jhandle, jlong jperiodicCompactionSeconds) { + auto* cf_opts = + reinterpret_cast(jhandle); + cf_opts->periodic_compaction_seconds = + static_cast(jperiodicCompactionSeconds); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: periodicCompactionSeconds + * Signature: (J)J + */ +JNIEXPORT jlong JNICALL +Java_org_rocksdb_ColumnFamilyOptions_periodicCompactionSeconds(JNIEnv*, jobject, + jlong jhandle) { + auto* cf_opts = + reinterpret_cast(jhandle); + return static_cast(cf_opts->periodic_compaction_seconds); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionOptionsUniversal + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsUniversal( + JNIEnv*, jobject, jlong jhandle, + jlong jcompaction_options_universal_handle) { + auto* cf_opts = + reinterpret_cast(jhandle); + auto* opts_uni = + reinterpret_cast( + jcompaction_options_universal_handle); + cf_opts->compaction_options_universal = *opts_uni; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionOptionsFIFO + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionOptionsFIFO( + JNIEnv*, jobject, jlong jhandle, jlong jcompaction_options_fifo_handle) { + auto* cf_opts = + reinterpret_cast(jhandle); + auto* opts_fifo = reinterpret_cast( + jcompaction_options_fifo_handle); + cf_opts->compaction_options_fifo = *opts_fifo; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setForceConsistencyChecks + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setForceConsistencyChecks( + JNIEnv*, jobject, jlong jhandle, jboolean jforce_consistency_checks) { + auto* cf_opts = + reinterpret_cast(jhandle); + cf_opts->force_consistency_checks = + static_cast(jforce_consistency_checks); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: forceConsistencyChecks + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_forceConsistencyChecks( + JNIEnv*, jobject, jlong jhandle) { + auto* cf_opts = + reinterpret_cast(jhandle); + return static_cast(cf_opts->force_consistency_checks); +} + +/// BLOB options + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setEnableBlobFiles + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setEnableBlobFiles( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_blob_files) { + auto* opts = + reinterpret_cast(jhandle); + opts->enable_blob_files = static_cast(jenable_blob_files); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: enableBlobFiles + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_enableBlobFiles(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return static_cast(opts->enable_blob_files); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMinBlobSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMinBlobSize(JNIEnv*, jobject, + jlong jhandle, + jlong jmin_blob_size) { + auto* opts = + reinterpret_cast(jhandle); + opts->min_blob_size = static_cast(jmin_blob_size); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: minBlobSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_minBlobSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return static_cast(opts->min_blob_size); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setBlobFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setBlobFileSize( + JNIEnv*, jobject, jlong jhandle, jlong jblob_file_size) { + auto* opts = + reinterpret_cast(jhandle); + opts->blob_file_size = static_cast(jblob_file_size); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: blobFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_blobFileSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return static_cast(opts->blob_file_size); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setBlobCompressionType + * Signature: (JB)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setBlobCompressionType( + JNIEnv*, jobject, jlong jhandle, jbyte jblob_compression_type_value) { + auto* opts = + reinterpret_cast(jhandle); + opts->blob_compression_type = + ROCKSDB_NAMESPACE::CompressionTypeJni::toCppCompressionType( + jblob_compression_type_value); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: blobCompressionType + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ColumnFamilyOptions_blobCompressionType(JNIEnv*, jobject, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::CompressionTypeJni::toJavaCompressionType( + opts->blob_compression_type); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setEnableBlobGarbageCollection + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setEnableBlobGarbageCollection( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_blob_garbage_collection) { + auto* opts = + reinterpret_cast(jhandle); + opts->enable_blob_garbage_collection = + static_cast(jenable_blob_garbage_collection); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: enableBlobGarbageCollection + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_enableBlobGarbageCollection( + JNIEnv*, jobject, jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return static_cast(opts->enable_blob_garbage_collection); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setBlobGarbageCollectionAgeCutoff + * Signature: (JD)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setBlobGarbageCollectionAgeCutoff( + JNIEnv*, jobject, jlong jhandle, + jdouble jblob_garbage_collection_age_cutoff) { + auto* opts = + reinterpret_cast(jhandle); + opts->blob_garbage_collection_age_cutoff = + static_cast(jblob_garbage_collection_age_cutoff); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: blobGarbageCollectionAgeCutoff + * Signature: (J)D + */ +jdouble Java_org_rocksdb_ColumnFamilyOptions_blobGarbageCollectionAgeCutoff( + JNIEnv*, jobject, jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return static_cast(opts->blob_garbage_collection_age_cutoff); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setBlobGarbageCollectionForceThreshold + * Signature: (JD)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setBlobGarbageCollectionForceThreshold( + JNIEnv*, jobject, jlong jhandle, + jdouble jblob_garbage_collection_force_threshold) { + auto* opts = + reinterpret_cast(jhandle); + opts->blob_garbage_collection_force_threshold = + static_cast(jblob_garbage_collection_force_threshold); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: blobGarbageCollectionForceThreshold + * Signature: (J)D + */ +jdouble +Java_org_rocksdb_ColumnFamilyOptions_blobGarbageCollectionForceThreshold( + JNIEnv*, jobject, jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return static_cast(opts->blob_garbage_collection_force_threshold); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setBlobCompactionReadaheadSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setBlobCompactionReadaheadSize( + JNIEnv*, jobject, jlong jhandle, jlong jblob_compaction_readahead_size) { + auto* opts = + reinterpret_cast(jhandle); + opts->blob_compaction_readahead_size = + static_cast(jblob_compaction_readahead_size); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: blobCompactionReadaheadSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_blobCompactionReadaheadSize( + JNIEnv*, jobject, jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return static_cast(opts->blob_compaction_readahead_size); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setBlobFileStartingLevel + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setBlobFileStartingLevel( + JNIEnv*, jobject, jlong jhandle, jint jblob_file_starting_level) { + auto* opts = + reinterpret_cast(jhandle); + opts->blob_file_starting_level = jblob_file_starting_level; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: blobFileStartingLevel + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_blobFileStartingLevel(JNIEnv*, + jobject, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return static_cast(opts->blob_file_starting_level); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setPrepopulateBlobCache + * Signature: (JB)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setPrepopulateBlobCache( + JNIEnv*, jobject, jlong jhandle, jbyte jprepopulate_blob_cache_value) { + auto* opts = + reinterpret_cast(jhandle); + opts->prepopulate_blob_cache = + ROCKSDB_NAMESPACE::PrepopulateBlobCacheJni::toCppPrepopulateBlobCache( + jprepopulate_blob_cache_value); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: prepopulateBlobCache + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ColumnFamilyOptions_prepopulateBlobCache(JNIEnv*, + jobject, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::PrepopulateBlobCacheJni::toJavaPrepopulateBlobCache( + opts->prepopulate_blob_cache); +} + +///////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::DBOptions + +/* + * Class: org_rocksdb_DBOptions + * Method: newDBOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv*, jclass) { + auto* dbop = new ROCKSDB_NAMESPACE::DBOptions(); + return GET_CPLUSPLUS_POINTER(dbop); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: copyDBOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_copyDBOptions(JNIEnv*, jclass, jlong jhandle) { + auto new_opt = new ROCKSDB_NAMESPACE::DBOptions( + *(reinterpret_cast(jhandle))); + return GET_CPLUSPLUS_POINTER(new_opt); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: newDBOptionsFromOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_newDBOptionsFromOptions( + JNIEnv*, jclass, jlong joptions_handle) { + auto new_opt = new ROCKSDB_NAMESPACE::DBOptions( + *reinterpret_cast(joptions_handle)); + return GET_CPLUSPLUS_POINTER(new_opt); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: getDBOptionsFromProps + * Signature: (JLjava/lang/String;)J + */ +jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps__JLjava_lang_String_2( + JNIEnv* env, jclass, jlong config_handle, jstring jopt_string) { + const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr); + if (opt_string == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + + auto* config_options = + reinterpret_cast(config_handle); + auto* db_options = new ROCKSDB_NAMESPACE::DBOptions(); + ROCKSDB_NAMESPACE::Status status = ROCKSDB_NAMESPACE::GetDBOptionsFromString( + *config_options, ROCKSDB_NAMESPACE::DBOptions(), opt_string, db_options); + + env->ReleaseStringUTFChars(jopt_string, opt_string); + + // Check if DBOptions creation was possible. + jlong ret_value = 0; + if (status.ok()) { + ret_value = GET_CPLUSPLUS_POINTER(db_options); + } else { + // if operation failed the DBOptions need to be deleted + // again to prevent a memory leak. + delete db_options; + } + return ret_value; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: getDBOptionsFromProps + * Signature: (Ljava/util/String;)J + */ +jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps__Ljava_lang_String_2( + JNIEnv* env, jclass, jstring jopt_string) { + const char* opt_string = env->GetStringUTFChars(jopt_string, nullptr); + if (opt_string == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + + auto* db_options = new ROCKSDB_NAMESPACE::DBOptions(); + ROCKSDB_NAMESPACE::Status status = ROCKSDB_NAMESPACE::GetDBOptionsFromString( + ROCKSDB_NAMESPACE::DBOptions(), opt_string, db_options); + + env->ReleaseStringUTFChars(jopt_string, opt_string); + + // Check if DBOptions creation was possible. + jlong ret_value = 0; + if (status.ok()) { + ret_value = GET_CPLUSPLUS_POINTER(db_options); + } else { + // if operation failed the DBOptions need to be deleted + // again to prevent a memory leak. + delete db_options; + } + return ret_value; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_DBOptions_disposeInternal(JNIEnv*, jobject, + jlong handle) { + auto* dbo = reinterpret_cast(handle); + assert(dbo != nullptr); + delete dbo; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: optimizeForSmallDb + * Signature: (J)V + */ +void Java_org_rocksdb_DBOptions_optimizeForSmallDb(JNIEnv*, jobject, + jlong jhandle) { + reinterpret_cast(jhandle) + ->OptimizeForSmallDb(); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setEnv + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setEnv(JNIEnv*, jobject, jlong jhandle, + jlong jenv_handle) { + reinterpret_cast(jhandle)->env = + reinterpret_cast(jenv_handle); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setIncreaseParallelism + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setIncreaseParallelism(JNIEnv*, jobject, + jlong jhandle, + jint totalThreads) { + reinterpret_cast(jhandle)->IncreaseParallelism( + static_cast(totalThreads)); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setCreateIfMissing + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setCreateIfMissing(JNIEnv*, jobject, + jlong jhandle, + jboolean flag) { + reinterpret_cast(jhandle)->create_if_missing = + flag; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: createIfMissing + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_createIfMissing(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->create_if_missing; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setCreateMissingColumnFamilies + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setCreateMissingColumnFamilies(JNIEnv*, jobject, + jlong jhandle, + jboolean flag) { + reinterpret_cast(jhandle) + ->create_missing_column_families = flag; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: createMissingColumnFamilies + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_createMissingColumnFamilies(JNIEnv*, + jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->create_missing_column_families; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setErrorIfExists + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setErrorIfExists(JNIEnv*, jobject, + jlong jhandle, + jboolean error_if_exists) { + reinterpret_cast(jhandle)->error_if_exists = + static_cast(error_if_exists); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: errorIfExists + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_errorIfExists(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->error_if_exists; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setParanoidChecks + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setParanoidChecks(JNIEnv*, jobject, + jlong jhandle, + jboolean paranoid_checks) { + reinterpret_cast(jhandle)->paranoid_checks = + static_cast(paranoid_checks); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: paranoidChecks + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_paranoidChecks(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->paranoid_checks; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setRateLimiter + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setRateLimiter(JNIEnv*, jobject, jlong jhandle, + jlong jrate_limiter_handle) { + std::shared_ptr* pRateLimiter = + reinterpret_cast*>( + jrate_limiter_handle); + reinterpret_cast(jhandle)->rate_limiter = + *pRateLimiter; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setSstFileManager + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setSstFileManager( + JNIEnv*, jobject, jlong jhandle, jlong jsst_file_manager_handle) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jsst_file_manager_handle); + reinterpret_cast(jhandle)->sst_file_manager = + *sptr_sst_file_manager; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setLogger + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setLogger(JNIEnv*, jobject, jlong jhandle, + jlong jlogger_handle) { + std::shared_ptr* pLogger = + reinterpret_cast*>( + jlogger_handle); + reinterpret_cast(jhandle)->info_log = *pLogger; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setInfoLogLevel + * Signature: (JB)V + */ +void Java_org_rocksdb_DBOptions_setInfoLogLevel(JNIEnv*, jobject, jlong jhandle, + jbyte jlog_level) { + reinterpret_cast(jhandle)->info_log_level = + static_cast(jlog_level); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: infoLogLevel + * Signature: (J)B + */ +jbyte Java_org_rocksdb_DBOptions_infoLogLevel(JNIEnv*, jobject, jlong jhandle) { + return static_cast( + reinterpret_cast(jhandle)->info_log_level); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxTotalWalSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setMaxTotalWalSize(JNIEnv*, jobject, + jlong jhandle, + jlong jmax_total_wal_size) { + reinterpret_cast(jhandle)->max_total_wal_size = + static_cast(jmax_total_wal_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxTotalWalSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_maxTotalWalSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_total_wal_size; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxOpenFiles + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setMaxOpenFiles(JNIEnv*, jobject, jlong jhandle, + jint max_open_files) { + reinterpret_cast(jhandle)->max_open_files = + static_cast(max_open_files); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxOpenFiles + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_maxOpenFiles(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_open_files; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxFileOpeningThreads + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setMaxFileOpeningThreads( + JNIEnv*, jobject, jlong jhandle, jint jmax_file_opening_threads) { + reinterpret_cast(jhandle) + ->max_file_opening_threads = static_cast(jmax_file_opening_threads); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxFileOpeningThreads + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_maxFileOpeningThreads(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_file_opening_threads); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setStatistics + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setStatistics(JNIEnv*, jobject, jlong jhandle, + jlong jstatistics_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* pSptr = + reinterpret_cast*>( + jstatistics_handle); + opt->statistics = *pSptr; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: statistics + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_statistics(JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + std::shared_ptr sptr = opt->statistics; + if (sptr == nullptr) { + return 0; + } else { + std::shared_ptr* pSptr = + new std::shared_ptr(sptr); + return GET_CPLUSPLUS_POINTER(pSptr); + } +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setUseFsync + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setUseFsync(JNIEnv*, jobject, jlong jhandle, + jboolean use_fsync) { + reinterpret_cast(jhandle)->use_fsync = + static_cast(use_fsync); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: useFsync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_useFsync(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->use_fsync; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDbPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_DBOptions_setDbPaths(JNIEnv* env, jobject, jlong jhandle, + jobjectArray jpaths, + jlongArray jtarget_sizes) { + std::vector db_paths; + jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, nullptr); + if (ptr_jtarget_size == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + jboolean has_exception = JNI_FALSE; + const jsize len = env->GetArrayLength(jpaths); + for (jsize i = 0; i < len; i++) { + jobject jpath = + reinterpret_cast(env->GetObjectArrayElement(jpaths, i)); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + std::string path = ROCKSDB_NAMESPACE::JniUtil::copyStdString( + env, static_cast(jpath), &has_exception); + env->DeleteLocalRef(jpath); + + if (has_exception == JNI_TRUE) { + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + + jlong jtarget_size = ptr_jtarget_size[i]; + + db_paths.push_back( + ROCKSDB_NAMESPACE::DbPath(path, static_cast(jtarget_size))); + } + + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + + auto* opt = reinterpret_cast(jhandle); + opt->db_paths = db_paths; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: dbPathsLen + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_dbPathsLen(JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->db_paths.size()); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: dbPaths + * Signature: (J[Ljava/lang/String;[J)V + */ +void Java_org_rocksdb_DBOptions_dbPaths(JNIEnv* env, jobject, jlong jhandle, + jobjectArray jpaths, + jlongArray jtarget_sizes) { + jboolean is_copy; + jlong* ptr_jtarget_size = env->GetLongArrayElements(jtarget_sizes, &is_copy); + if (ptr_jtarget_size == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + auto* opt = reinterpret_cast(jhandle); + const jsize len = env->GetArrayLength(jpaths); + for (jsize i = 0; i < len; i++) { + ROCKSDB_NAMESPACE::DbPath db_path = opt->db_paths[i]; + + jstring jpath = env->NewStringUTF(db_path.path.c_str()); + if (jpath == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + env->SetObjectArrayElement(jpaths, i, jpath); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jpath); + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, JNI_ABORT); + return; + } + + ptr_jtarget_size[i] = static_cast(db_path.target_size); + } + + env->ReleaseLongArrayElements(jtarget_sizes, ptr_jtarget_size, + is_copy == JNI_TRUE ? 0 : JNI_ABORT); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDbLogDir + * Signature: (JLjava/lang/String)V + */ +void Java_org_rocksdb_DBOptions_setDbLogDir(JNIEnv* env, jobject, jlong jhandle, + jstring jdb_log_dir) { + const char* log_dir = env->GetStringUTFChars(jdb_log_dir, nullptr); + if (log_dir == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + reinterpret_cast(jhandle)->db_log_dir.assign( + log_dir); + env->ReleaseStringUTFChars(jdb_log_dir, log_dir); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: dbLogDir + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_DBOptions_dbLogDir(JNIEnv* env, jobject, + jlong jhandle) { + return env->NewStringUTF( + reinterpret_cast(jhandle) + ->db_log_dir.c_str()); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWalDir + * Signature: (JLjava/lang/String)V + */ +void Java_org_rocksdb_DBOptions_setWalDir(JNIEnv* env, jobject, jlong jhandle, + jstring jwal_dir) { + const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0); + reinterpret_cast(jhandle)->wal_dir.assign( + wal_dir); + env->ReleaseStringUTFChars(jwal_dir, wal_dir); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: walDir + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_DBOptions_walDir(JNIEnv* env, jobject, jlong jhandle) { + return env->NewStringUTF( + reinterpret_cast(jhandle) + ->wal_dir.c_str()); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDeleteObsoleteFilesPeriodMicros + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setDeleteObsoleteFilesPeriodMicros( + JNIEnv*, jobject, jlong jhandle, jlong micros) { + reinterpret_cast(jhandle) + ->delete_obsolete_files_period_micros = static_cast(micros); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: deleteObsoleteFilesPeriodMicros + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_deleteObsoleteFilesPeriodMicros( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->delete_obsolete_files_period_micros; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxBackgroundCompactions + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setMaxBackgroundCompactions(JNIEnv*, jobject, + jlong jhandle, + jint max) { + reinterpret_cast(jhandle) + ->max_background_compactions = static_cast(max); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxBackgroundCompactions + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_maxBackgroundCompactions(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_background_compactions; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxSubcompactions + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setMaxSubcompactions(JNIEnv*, jobject, + jlong jhandle, jint max) { + reinterpret_cast(jhandle)->max_subcompactions = + static_cast(max); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxSubcompactions + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_maxSubcompactions(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_subcompactions; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxBackgroundFlushes + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setMaxBackgroundFlushes( + JNIEnv*, jobject, jlong jhandle, jint max_background_flushes) { + reinterpret_cast(jhandle) + ->max_background_flushes = static_cast(max_background_flushes); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxBackgroundFlushes + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_maxBackgroundFlushes(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_background_flushes; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxBackgroundJobs + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setMaxBackgroundJobs(JNIEnv*, jobject, + jlong jhandle, + jint max_background_jobs) { + reinterpret_cast(jhandle) + ->max_background_jobs = static_cast(max_background_jobs); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxBackgroundJobs + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_maxBackgroundJobs(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_background_jobs; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxLogFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setMaxLogFileSize(JNIEnv* env, jobject, + jlong jhandle, + jlong max_log_file_size) { + auto s = + ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(max_log_file_size); + if (s.ok()) { + reinterpret_cast(jhandle) + ->max_log_file_size = max_log_file_size; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxLogFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_maxLogFileSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_log_file_size; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setLogFileTimeToRoll + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setLogFileTimeToRoll( + JNIEnv* env, jobject, jlong jhandle, jlong log_file_time_to_roll) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + log_file_time_to_roll); + if (s.ok()) { + reinterpret_cast(jhandle) + ->log_file_time_to_roll = log_file_time_to_roll; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_DBOptions + * Method: logFileTimeToRoll + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_logFileTimeToRoll(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->log_file_time_to_roll; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setKeepLogFileNum + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setKeepLogFileNum(JNIEnv* env, jobject, + jlong jhandle, + jlong keep_log_file_num) { + auto s = + ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t(keep_log_file_num); + if (s.ok()) { + reinterpret_cast(jhandle) + ->keep_log_file_num = keep_log_file_num; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_DBOptions + * Method: keepLogFileNum + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_keepLogFileNum(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->keep_log_file_num; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setRecycleLogFileNum + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setRecycleLogFileNum( + JNIEnv* env, jobject, jlong jhandle, jlong recycle_log_file_num) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + recycle_log_file_num); + if (s.ok()) { + reinterpret_cast(jhandle) + ->recycle_log_file_num = recycle_log_file_num; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_DBOptions + * Method: recycleLogFileNum + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_recycleLogFileNum(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->recycle_log_file_num; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxManifestFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setMaxManifestFileSize( + JNIEnv*, jobject, jlong jhandle, jlong max_manifest_file_size) { + reinterpret_cast(jhandle) + ->max_manifest_file_size = static_cast(max_manifest_file_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxManifestFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_maxManifestFileSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_manifest_file_size; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setTableCacheNumshardbits + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setTableCacheNumshardbits( + JNIEnv*, jobject, jlong jhandle, jint table_cache_numshardbits) { + reinterpret_cast(jhandle) + ->table_cache_numshardbits = static_cast(table_cache_numshardbits); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: tableCacheNumshardbits + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_tableCacheNumshardbits(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->table_cache_numshardbits; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWalTtlSeconds + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWalTtlSeconds(JNIEnv*, jobject, + jlong jhandle, + jlong WAL_ttl_seconds) { + reinterpret_cast(jhandle)->WAL_ttl_seconds = + static_cast(WAL_ttl_seconds); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: walTtlSeconds + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_walTtlSeconds(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->WAL_ttl_seconds; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWalSizeLimitMB + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWalSizeLimitMB(JNIEnv*, jobject, + jlong jhandle, + jlong WAL_size_limit_MB) { + reinterpret_cast(jhandle)->WAL_size_limit_MB = + static_cast(WAL_size_limit_MB); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: walTtlSeconds + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_walSizeLimitMB(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->WAL_size_limit_MB; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxWriteBatchGroupSizeBytes + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setMaxWriteBatchGroupSizeBytes( + JNIEnv*, jclass, jlong jhandle, jlong jmax_write_batch_group_size_bytes) { + auto* opt = reinterpret_cast(jhandle); + opt->max_write_batch_group_size_bytes = + static_cast(jmax_write_batch_group_size_bytes); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxWriteBatchGroupSizeBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_maxWriteBatchGroupSizeBytes(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_write_batch_group_size_bytes); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setManifestPreallocationSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setManifestPreallocationSize( + JNIEnv* env, jobject, jlong jhandle, jlong preallocation_size) { + auto s = ROCKSDB_NAMESPACE::JniUtil::check_if_jlong_fits_size_t( + preallocation_size); + if (s.ok()) { + reinterpret_cast(jhandle) + ->manifest_preallocation_size = preallocation_size; + } else { + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_DBOptions + * Method: manifestPreallocationSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_manifestPreallocationSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->manifest_preallocation_size; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: useDirectReads + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_useDirectReads(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->use_direct_reads; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setUseDirectReads + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setUseDirectReads(JNIEnv*, jobject, + jlong jhandle, + jboolean use_direct_reads) { + reinterpret_cast(jhandle)->use_direct_reads = + static_cast(use_direct_reads); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: useDirectIoForFlushAndCompaction + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_useDirectIoForFlushAndCompaction( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->use_direct_io_for_flush_and_compaction; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setUseDirectReads + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setUseDirectIoForFlushAndCompaction( + JNIEnv*, jobject, jlong jhandle, + jboolean use_direct_io_for_flush_and_compaction) { + reinterpret_cast(jhandle) + ->use_direct_io_for_flush_and_compaction = + static_cast(use_direct_io_for_flush_and_compaction); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAllowFAllocate + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllowFAllocate(JNIEnv*, jobject, + jlong jhandle, + jboolean jallow_fallocate) { + reinterpret_cast(jhandle)->allow_fallocate = + static_cast(jallow_fallocate); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allowFAllocate + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allowFAllocate(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_fallocate); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAllowMmapReads + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllowMmapReads(JNIEnv*, jobject, + jlong jhandle, + jboolean allow_mmap_reads) { + reinterpret_cast(jhandle)->allow_mmap_reads = + static_cast(allow_mmap_reads); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allowMmapReads + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allowMmapReads(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->allow_mmap_reads; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAllowMmapWrites + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllowMmapWrites(JNIEnv*, jobject, + jlong jhandle, + jboolean allow_mmap_writes) { + reinterpret_cast(jhandle)->allow_mmap_writes = + static_cast(allow_mmap_writes); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allowMmapWrites + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allowMmapWrites(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->allow_mmap_writes; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setIsFdCloseOnExec + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setIsFdCloseOnExec( + JNIEnv*, jobject, jlong jhandle, jboolean is_fd_close_on_exec) { + reinterpret_cast(jhandle) + ->is_fd_close_on_exec = static_cast(is_fd_close_on_exec); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: isFdCloseOnExec + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_isFdCloseOnExec(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->is_fd_close_on_exec; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setStatsDumpPeriodSec + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setStatsDumpPeriodSec( + JNIEnv*, jobject, jlong jhandle, jint jstats_dump_period_sec) { + reinterpret_cast(jhandle) + ->stats_dump_period_sec = + static_cast(jstats_dump_period_sec); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: statsDumpPeriodSec + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_statsDumpPeriodSec(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->stats_dump_period_sec; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setStatsPersistPeriodSec + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setStatsPersistPeriodSec( + JNIEnv*, jobject, jlong jhandle, jint jstats_persist_period_sec) { + reinterpret_cast(jhandle) + ->stats_persist_period_sec = + static_cast(jstats_persist_period_sec); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: statsPersistPeriodSec + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_statsPersistPeriodSec(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->stats_persist_period_sec; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setStatsHistoryBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setStatsHistoryBufferSize( + JNIEnv*, jobject, jlong jhandle, jlong jstats_history_buffer_size) { + reinterpret_cast(jhandle) + ->stats_history_buffer_size = + static_cast(jstats_history_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: statsHistoryBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_statsHistoryBufferSize(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->stats_history_buffer_size; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAdviseRandomOnOpen + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAdviseRandomOnOpen( + JNIEnv*, jobject, jlong jhandle, jboolean advise_random_on_open) { + reinterpret_cast(jhandle) + ->advise_random_on_open = static_cast(advise_random_on_open); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: adviseRandomOnOpen + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_adviseRandomOnOpen(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->advise_random_on_open; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDbWriteBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setDbWriteBufferSize( + JNIEnv*, jobject, jlong jhandle, jlong jdb_write_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->db_write_buffer_size = static_cast(jdb_write_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWriteBufferManager + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWriteBufferManager( + JNIEnv*, jobject, jlong jdb_options_handle, + jlong jwrite_buffer_manager_handle) { + auto* write_buffer_manager = + reinterpret_cast*>( + jwrite_buffer_manager_handle); + reinterpret_cast(jdb_options_handle) + ->write_buffer_manager = *write_buffer_manager; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: dbWriteBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_dbWriteBufferSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->db_write_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAccessHintOnCompactionStart + * Signature: (JB)V + */ +void Java_org_rocksdb_DBOptions_setAccessHintOnCompactionStart( + JNIEnv*, jobject, jlong jhandle, jbyte jaccess_hint_value) { + auto* opt = reinterpret_cast(jhandle); + opt->access_hint_on_compaction_start = + ROCKSDB_NAMESPACE::AccessHintJni::toCppAccessHint(jaccess_hint_value); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: accessHintOnCompactionStart + * Signature: (J)B + */ +jbyte Java_org_rocksdb_DBOptions_accessHintOnCompactionStart(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::AccessHintJni::toJavaAccessHint( + opt->access_hint_on_compaction_start); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setCompactionReadaheadSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setCompactionReadaheadSize( + JNIEnv*, jobject, jlong jhandle, jlong jcompaction_readahead_size) { + auto* opt = reinterpret_cast(jhandle); + opt->compaction_readahead_size = + static_cast(jcompaction_readahead_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: compactionReadaheadSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_compactionReadaheadSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->compaction_readahead_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setRandomAccessMaxBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setRandomAccessMaxBufferSize( + JNIEnv*, jobject, jlong jhandle, jlong jrandom_access_max_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->random_access_max_buffer_size = + static_cast(jrandom_access_max_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: randomAccessMaxBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_randomAccessMaxBufferSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->random_access_max_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWritableFileMaxBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWritableFileMaxBufferSize( + JNIEnv*, jobject, jlong jhandle, jlong jwritable_file_max_buffer_size) { + auto* opt = reinterpret_cast(jhandle); + opt->writable_file_max_buffer_size = + static_cast(jwritable_file_max_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: writableFileMaxBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_writableFileMaxBufferSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->writable_file_max_buffer_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setUseAdaptiveMutex + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setUseAdaptiveMutex( + JNIEnv*, jobject, jlong jhandle, jboolean use_adaptive_mutex) { + reinterpret_cast(jhandle)->use_adaptive_mutex = + static_cast(use_adaptive_mutex); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: useAdaptiveMutex + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_useAdaptiveMutex(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->use_adaptive_mutex; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setBytesPerSync + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setBytesPerSync(JNIEnv*, jobject, jlong jhandle, + jlong bytes_per_sync) { + reinterpret_cast(jhandle)->bytes_per_sync = + static_cast(bytes_per_sync); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: bytesPerSync + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_bytesPerSync(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->bytes_per_sync; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWalBytesPerSync + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWalBytesPerSync(JNIEnv*, jobject, + jlong jhandle, + jlong jwal_bytes_per_sync) { + reinterpret_cast(jhandle)->wal_bytes_per_sync = + static_cast(jwal_bytes_per_sync); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: walBytesPerSync + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_walBytesPerSync(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->wal_bytes_per_sync); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setStrictBytesPerSync + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setStrictBytesPerSync( + JNIEnv*, jobject, jlong jhandle, jboolean jstrict_bytes_per_sync) { + reinterpret_cast(jhandle) + ->strict_bytes_per_sync = jstrict_bytes_per_sync == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: strictBytesPerSync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_strictBytesPerSync(JNIEnv*, jobject, + jlong jhandle) { + return static_cast( + reinterpret_cast(jhandle) + ->strict_bytes_per_sync); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setEventListeners + * Signature: (J[J)V + */ +void Java_org_rocksdb_DBOptions_setEventListeners(JNIEnv* env, jclass, + jlong jhandle, + jlongArray jlistener_array) { + auto* opt = reinterpret_cast(jhandle); + rocksdb_set_event_listeners_helper(env, jlistener_array, opt->listeners); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: eventListeners + * Signature: (J)[Lorg/rocksdb/AbstractEventListener; + */ +jobjectArray Java_org_rocksdb_DBOptions_eventListeners(JNIEnv* env, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return rocksdb_get_event_listeners_helper(env, opt->listeners); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDelayedWriteRate + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setDelayedWriteRate(JNIEnv*, jobject, + jlong jhandle, + jlong jdelayed_write_rate) { + auto* opt = reinterpret_cast(jhandle); + opt->delayed_write_rate = static_cast(jdelayed_write_rate); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: delayedWriteRate + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_delayedWriteRate(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->delayed_write_rate); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setEnablePipelinedWrite + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setEnablePipelinedWrite( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_pipelined_write) { + auto* opt = reinterpret_cast(jhandle); + opt->enable_pipelined_write = jenable_pipelined_write == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: enablePipelinedWrite + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_enablePipelinedWrite(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->enable_pipelined_write); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setUnorderedWrite + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setUnorderedWrite(JNIEnv*, jobject, + jlong jhandle, + jboolean junordered_write) { + auto* opt = reinterpret_cast(jhandle); + opt->unordered_write = junordered_write == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: unorderedWrite + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_unorderedWrite(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->unordered_write); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setEnableThreadTracking + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setEnableThreadTracking( + JNIEnv*, jobject, jlong jhandle, jboolean jenable_thread_tracking) { + auto* opt = reinterpret_cast(jhandle); + opt->enable_thread_tracking = jenable_thread_tracking == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: enableThreadTracking + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_enableThreadTracking(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->enable_thread_tracking); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAllowConcurrentMemtableWrite + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllowConcurrentMemtableWrite( + JNIEnv*, jobject, jlong jhandle, jboolean allow) { + reinterpret_cast(jhandle) + ->allow_concurrent_memtable_write = static_cast(allow); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allowConcurrentMemtableWrite + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allowConcurrentMemtableWrite( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->allow_concurrent_memtable_write; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setEnableWriteThreadAdaptiveYield + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setEnableWriteThreadAdaptiveYield( + JNIEnv*, jobject, jlong jhandle, jboolean yield) { + reinterpret_cast(jhandle) + ->enable_write_thread_adaptive_yield = static_cast(yield); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: enableWriteThreadAdaptiveYield + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_enableWriteThreadAdaptiveYield( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->enable_write_thread_adaptive_yield; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWriteThreadMaxYieldUsec + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWriteThreadMaxYieldUsec(JNIEnv*, jobject, + jlong jhandle, + jlong max) { + reinterpret_cast(jhandle) + ->write_thread_max_yield_usec = static_cast(max); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: writeThreadMaxYieldUsec + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_writeThreadMaxYieldUsec(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->write_thread_max_yield_usec; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWriteThreadSlowYieldUsec + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWriteThreadSlowYieldUsec(JNIEnv*, jobject, + jlong jhandle, + jlong slow) { + reinterpret_cast(jhandle) + ->write_thread_slow_yield_usec = static_cast(slow); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: writeThreadSlowYieldUsec + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_writeThreadSlowYieldUsec(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->write_thread_slow_yield_usec; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setSkipStatsUpdateOnDbOpen + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setSkipStatsUpdateOnDbOpen( + JNIEnv*, jobject, jlong jhandle, jboolean jskip_stats_update_on_db_open) { + auto* opt = reinterpret_cast(jhandle); + opt->skip_stats_update_on_db_open = + static_cast(jskip_stats_update_on_db_open); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: skipStatsUpdateOnDbOpen + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_skipStatsUpdateOnDbOpen(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->skip_stats_update_on_db_open); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setSkipCheckingSstFileSizesOnDbOpen + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setSkipCheckingSstFileSizesOnDbOpen( + JNIEnv*, jclass, jlong jhandle, + jboolean jskip_checking_sst_file_sizes_on_db_open) { + auto* opt = reinterpret_cast(jhandle); + opt->skip_checking_sst_file_sizes_on_db_open = + static_cast(jskip_checking_sst_file_sizes_on_db_open); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: skipCheckingSstFileSizesOnDbOpen + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_skipCheckingSstFileSizesOnDbOpen( + JNIEnv*, jclass, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->skip_checking_sst_file_sizes_on_db_open); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWalRecoveryMode + * Signature: (JB)V + */ +void Java_org_rocksdb_DBOptions_setWalRecoveryMode( + JNIEnv*, jobject, jlong jhandle, jbyte jwal_recovery_mode_value) { + auto* opt = reinterpret_cast(jhandle); + opt->wal_recovery_mode = + ROCKSDB_NAMESPACE::WALRecoveryModeJni::toCppWALRecoveryMode( + jwal_recovery_mode_value); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: walRecoveryMode + * Signature: (J)B + */ +jbyte Java_org_rocksdb_DBOptions_walRecoveryMode(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::WALRecoveryModeJni::toJavaWALRecoveryMode( + opt->wal_recovery_mode); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAllow2pc + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllow2pc(JNIEnv*, jobject, jlong jhandle, + jboolean jallow_2pc) { + auto* opt = reinterpret_cast(jhandle); + opt->allow_2pc = static_cast(jallow_2pc); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allow2pc + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allow2pc(JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_2pc); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setRowCache + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setRowCache(JNIEnv*, jobject, jlong jhandle, + jlong jrow_cache_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* row_cache = + reinterpret_cast*>( + jrow_cache_handle); + opt->row_cache = *row_cache; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWalFilter + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWalFilter(JNIEnv*, jobject, jlong jhandle, + jlong jwal_filter_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* wal_filter = reinterpret_cast( + jwal_filter_handle); + opt->wal_filter = wal_filter; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setFailIfOptionsFileError + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setFailIfOptionsFileError( + JNIEnv*, jobject, jlong jhandle, jboolean jfail_if_options_file_error) { + auto* opt = reinterpret_cast(jhandle); + opt->fail_if_options_file_error = + static_cast(jfail_if_options_file_error); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: failIfOptionsFileError + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_failIfOptionsFileError(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->fail_if_options_file_error); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDumpMallocStats + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setDumpMallocStats( + JNIEnv*, jobject, jlong jhandle, jboolean jdump_malloc_stats) { + auto* opt = reinterpret_cast(jhandle); + opt->dump_malloc_stats = static_cast(jdump_malloc_stats); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: dumpMallocStats + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_dumpMallocStats(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->dump_malloc_stats); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAvoidFlushDuringRecovery + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAvoidFlushDuringRecovery( + JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_recovery) { + auto* opt = reinterpret_cast(jhandle); + opt->avoid_flush_during_recovery = + static_cast(javoid_flush_during_recovery); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: avoidFlushDuringRecovery + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringRecovery(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->avoid_flush_during_recovery); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAllowIngestBehind + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllowIngestBehind( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_ingest_behind) { + auto* opt = reinterpret_cast(jhandle); + opt->allow_ingest_behind = jallow_ingest_behind == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allowIngestBehind + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allowIngestBehind(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->allow_ingest_behind); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setTwoWriteQueues + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setTwoWriteQueues(JNIEnv*, jobject, + jlong jhandle, + jboolean jtwo_write_queues) { + auto* opt = reinterpret_cast(jhandle); + opt->two_write_queues = jtwo_write_queues == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: twoWriteQueues + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_twoWriteQueues(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->two_write_queues); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setManualWalFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setManualWalFlush(JNIEnv*, jobject, + jlong jhandle, + jboolean jmanual_wal_flush) { + auto* opt = reinterpret_cast(jhandle); + opt->manual_wal_flush = jmanual_wal_flush == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: manualWalFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_manualWalFlush(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->manual_wal_flush); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAtomicFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAtomicFlush(JNIEnv*, jobject, jlong jhandle, + jboolean jatomic_flush) { + auto* opt = reinterpret_cast(jhandle); + opt->atomic_flush = jatomic_flush == JNI_TRUE; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: atomicFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_atomicFlush(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->atomic_flush); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAvoidFlushDuringShutdown + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAvoidFlushDuringShutdown( + JNIEnv*, jobject, jlong jhandle, jboolean javoid_flush_during_shutdown) { + auto* opt = reinterpret_cast(jhandle); + opt->avoid_flush_during_shutdown = + static_cast(javoid_flush_during_shutdown); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: avoidFlushDuringShutdown + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_avoidFlushDuringShutdown(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->avoid_flush_during_shutdown); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAvoidUnnecessaryBlockingIO + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAvoidUnnecessaryBlockingIO( + JNIEnv*, jclass, jlong jhandle, jboolean avoid_blocking_io) { + auto* opt = reinterpret_cast(jhandle); + opt->avoid_unnecessary_blocking_io = static_cast(avoid_blocking_io); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: avoidUnnecessaryBlockingIO + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_avoidUnnecessaryBlockingIO(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->avoid_unnecessary_blocking_io); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setPersistStatsToDisk + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setPersistStatsToDisk( + JNIEnv*, jclass, jlong jhandle, jboolean persist_stats_to_disk) { + auto* opt = reinterpret_cast(jhandle); + opt->persist_stats_to_disk = static_cast(persist_stats_to_disk); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: persistStatsToDisk + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_persistStatsToDisk(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->persist_stats_to_disk); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWriteDbidToManifest + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setWriteDbidToManifest( + JNIEnv*, jclass, jlong jhandle, jboolean jwrite_dbid_to_manifest) { + auto* opt = reinterpret_cast(jhandle); + opt->write_dbid_to_manifest = static_cast(jwrite_dbid_to_manifest); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: writeDbidToManifest + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_writeDbidToManifest(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->write_dbid_to_manifest); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setLogReadaheadSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setLogReadaheadSize(JNIEnv*, jclass, + jlong jhandle, + jlong jlog_readahead_size) { + auto* opt = reinterpret_cast(jhandle); + opt->log_readahead_size = static_cast(jlog_readahead_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: logReasaheadSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_logReadaheadSize(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->log_readahead_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setBestEffortsRecovery + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setBestEffortsRecovery( + JNIEnv*, jclass, jlong jhandle, jboolean jbest_efforts_recovery) { + auto* opt = reinterpret_cast(jhandle); + opt->best_efforts_recovery = static_cast(jbest_efforts_recovery); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: bestEffortsRecovery + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_bestEffortsRecovery(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->best_efforts_recovery); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxBgErrorResumeCount + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setMaxBgErrorResumeCount( + JNIEnv*, jclass, jlong jhandle, jint jmax_bgerror_resume_count) { + auto* opt = reinterpret_cast(jhandle); + opt->max_bgerror_resume_count = static_cast(jmax_bgerror_resume_count); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxBgerrorResumeCount + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_maxBgerrorResumeCount(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_bgerror_resume_count); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setBgerrorResumeRetryInterval + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setBgerrorResumeRetryInterval( + JNIEnv*, jclass, jlong jhandle, jlong jbgerror_resume_retry_interval) { + auto* opt = reinterpret_cast(jhandle); + opt->bgerror_resume_retry_interval = + static_cast(jbgerror_resume_retry_interval); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: bgerrorResumeRetryInterval + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_bgerrorResumeRetryInterval(JNIEnv*, jclass, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->bgerror_resume_retry_interval); +} + +////////////////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::WriteOptions + +/* + * Class: org_rocksdb_WriteOptions + * Method: newWriteOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_WriteOptions_newWriteOptions(JNIEnv*, jclass) { + auto* op = new ROCKSDB_NAMESPACE::WriteOptions(); + return GET_CPLUSPLUS_POINTER(op); +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: copyWriteOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_WriteOptions_copyWriteOptions(JNIEnv*, jclass, + jlong jhandle) { + auto new_opt = new ROCKSDB_NAMESPACE::WriteOptions( + *(reinterpret_cast(jhandle))); + return GET_CPLUSPLUS_POINTER(new_opt); +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: disposeInternal + * Signature: ()V + */ +void Java_org_rocksdb_WriteOptions_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* write_options = + reinterpret_cast(jhandle); + assert(write_options != nullptr); + delete write_options; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: setSync + * Signature: (JZ)V + */ +void Java_org_rocksdb_WriteOptions_setSync(JNIEnv*, jobject, jlong jhandle, + jboolean jflag) { + reinterpret_cast(jhandle)->sync = jflag; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: sync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteOptions_sync(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->sync; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: setDisableWAL + * Signature: (JZ)V + */ +void Java_org_rocksdb_WriteOptions_setDisableWAL(JNIEnv*, jobject, + jlong jhandle, + jboolean jflag) { + reinterpret_cast(jhandle)->disableWAL = + jflag; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: disableWAL + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteOptions_disableWAL(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->disableWAL; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: setIgnoreMissingColumnFamilies + * Signature: (JZ)V + */ +void Java_org_rocksdb_WriteOptions_setIgnoreMissingColumnFamilies( + JNIEnv*, jobject, jlong jhandle, jboolean jignore_missing_column_families) { + reinterpret_cast(jhandle) + ->ignore_missing_column_families = + static_cast(jignore_missing_column_families); +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: ignoreMissingColumnFamilies + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteOptions_ignoreMissingColumnFamilies( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->ignore_missing_column_families; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: setNoSlowdown + * Signature: (JZ)V + */ +void Java_org_rocksdb_WriteOptions_setNoSlowdown(JNIEnv*, jobject, + jlong jhandle, + jboolean jno_slowdown) { + reinterpret_cast(jhandle)->no_slowdown = + static_cast(jno_slowdown); +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: noSlowdown + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteOptions_noSlowdown(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->no_slowdown; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: setLowPri + * Signature: (JZ)V + */ +void Java_org_rocksdb_WriteOptions_setLowPri(JNIEnv*, jobject, jlong jhandle, + jboolean jlow_pri) { + reinterpret_cast(jhandle)->low_pri = + static_cast(jlow_pri); +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: lowPri + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteOptions_lowPri(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->low_pri; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: memtableInsertHintPerBatch + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteOptions_memtableInsertHintPerBatch( + JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle) + ->memtable_insert_hint_per_batch; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: setMemtableInsertHintPerBatch + * Signature: (JZ)V + */ +void Java_org_rocksdb_WriteOptions_setMemtableInsertHintPerBatch( + JNIEnv*, jobject, jlong jhandle, jboolean jmemtable_insert_hint_per_batch) { + reinterpret_cast(jhandle) + ->memtable_insert_hint_per_batch = + static_cast(jmemtable_insert_hint_per_batch); +} + +///////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::ReadOptions + +/* + * Class: org_rocksdb_ReadOptions + * Method: newReadOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_ReadOptions_newReadOptions__(JNIEnv*, jclass) { + auto* read_options = new ROCKSDB_NAMESPACE::ReadOptions(); + return GET_CPLUSPLUS_POINTER(read_options); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: newReadOptions + * Signature: (ZZ)J + */ +jlong Java_org_rocksdb_ReadOptions_newReadOptions__ZZ( + JNIEnv*, jclass, jboolean jverify_checksums, jboolean jfill_cache) { + auto* read_options = new ROCKSDB_NAMESPACE::ReadOptions( + static_cast(jverify_checksums), static_cast(jfill_cache)); + return GET_CPLUSPLUS_POINTER(read_options); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: copyReadOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_copyReadOptions(JNIEnv*, jclass, + jlong jhandle) { + auto new_opt = new ROCKSDB_NAMESPACE::ReadOptions( + *(reinterpret_cast(jhandle))); + return GET_CPLUSPLUS_POINTER(new_opt); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ReadOptions_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* read_options = + reinterpret_cast(jhandle); + assert(read_options != nullptr); + delete read_options; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setVerifyChecksums + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setVerifyChecksums( + JNIEnv*, jobject, jlong jhandle, jboolean jverify_checksums) { + reinterpret_cast(jhandle)->verify_checksums = + static_cast(jverify_checksums); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: verifyChecksums + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_verifyChecksums(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->verify_checksums; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setFillCache + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setFillCache(JNIEnv*, jobject, jlong jhandle, + jboolean jfill_cache) { + reinterpret_cast(jhandle)->fill_cache = + static_cast(jfill_cache); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: fillCache + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_fillCache(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle)->fill_cache; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setTailing + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setTailing(JNIEnv*, jobject, jlong jhandle, + jboolean jtailing) { + reinterpret_cast(jhandle)->tailing = + static_cast(jtailing); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: tailing + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_tailing(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->tailing; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: managed + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_managed(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->managed; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setManaged + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setManaged(JNIEnv*, jobject, jlong jhandle, + jboolean jmanaged) { + reinterpret_cast(jhandle)->managed = + static_cast(jmanaged); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: totalOrderSeek + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_totalOrderSeek(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->total_order_seek; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setTotalOrderSeek + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setTotalOrderSeek( + JNIEnv*, jobject, jlong jhandle, jboolean jtotal_order_seek) { + reinterpret_cast(jhandle)->total_order_seek = + static_cast(jtotal_order_seek); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: prefixSameAsStart + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_prefixSameAsStart(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->prefix_same_as_start; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setPrefixSameAsStart + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setPrefixSameAsStart( + JNIEnv*, jobject, jlong jhandle, jboolean jprefix_same_as_start) { + reinterpret_cast(jhandle) + ->prefix_same_as_start = static_cast(jprefix_same_as_start); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: pinData + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_pinData(JNIEnv*, jobject, jlong jhandle) { + return reinterpret_cast(jhandle)->pin_data; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setPinData + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setPinData(JNIEnv*, jobject, jlong jhandle, + jboolean jpin_data) { + reinterpret_cast(jhandle)->pin_data = + static_cast(jpin_data); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: backgroundPurgeOnIteratorCleanup + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_backgroundPurgeOnIteratorCleanup( + JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->background_purge_on_iterator_cleanup); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setBackgroundPurgeOnIteratorCleanup + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setBackgroundPurgeOnIteratorCleanup( + JNIEnv*, jobject, jlong jhandle, + jboolean jbackground_purge_on_iterator_cleanup) { + auto* opt = reinterpret_cast(jhandle); + opt->background_purge_on_iterator_cleanup = + static_cast(jbackground_purge_on_iterator_cleanup); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: readaheadSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_readaheadSize(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->readahead_size); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setReadaheadSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setReadaheadSize(JNIEnv*, jobject, + jlong jhandle, + jlong jreadahead_size) { + auto* opt = reinterpret_cast(jhandle); + opt->readahead_size = static_cast(jreadahead_size); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: maxSkippableInternalKeys + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_maxSkippableInternalKeys(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->max_skippable_internal_keys); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setMaxSkippableInternalKeys + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setMaxSkippableInternalKeys( + JNIEnv*, jobject, jlong jhandle, jlong jmax_skippable_internal_keys) { + auto* opt = reinterpret_cast(jhandle); + opt->max_skippable_internal_keys = + static_cast(jmax_skippable_internal_keys); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: ignoreRangeDeletions + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_ignoreRangeDeletions(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->ignore_range_deletions); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setIgnoreRangeDeletions + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setIgnoreRangeDeletions( + JNIEnv*, jobject, jlong jhandle, jboolean jignore_range_deletions) { + auto* opt = reinterpret_cast(jhandle); + opt->ignore_range_deletions = static_cast(jignore_range_deletions); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setSnapshot + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setSnapshot(JNIEnv*, jobject, jlong jhandle, + jlong jsnapshot) { + reinterpret_cast(jhandle)->snapshot = + reinterpret_cast(jsnapshot); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: snapshot + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_snapshot(JNIEnv*, jobject, jlong jhandle) { + auto& snapshot = + reinterpret_cast(jhandle)->snapshot; + return GET_CPLUSPLUS_POINTER(snapshot); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: readTier + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ReadOptions_readTier(JNIEnv*, jobject, jlong jhandle) { + return static_cast( + reinterpret_cast(jhandle)->read_tier); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setReadTier + * Signature: (JB)V + */ +void Java_org_rocksdb_ReadOptions_setReadTier(JNIEnv*, jobject, jlong jhandle, + jbyte jread_tier) { + reinterpret_cast(jhandle)->read_tier = + static_cast(jread_tier); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setIterateUpperBound + * Signature: (JJ)I + */ +void Java_org_rocksdb_ReadOptions_setIterateUpperBound( + JNIEnv*, jobject, jlong jhandle, jlong jupper_bound_slice_handle) { + reinterpret_cast(jhandle) + ->iterate_upper_bound = + reinterpret_cast(jupper_bound_slice_handle); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: iterateUpperBound + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_iterateUpperBound(JNIEnv*, jobject, + jlong jhandle) { + auto& upper_bound_slice_handle = + reinterpret_cast(jhandle) + ->iterate_upper_bound; + return GET_CPLUSPLUS_POINTER(upper_bound_slice_handle); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setIterateLowerBound + * Signature: (JJ)I + */ +void Java_org_rocksdb_ReadOptions_setIterateLowerBound( + JNIEnv*, jobject, jlong jhandle, jlong jlower_bound_slice_handle) { + reinterpret_cast(jhandle) + ->iterate_lower_bound = + reinterpret_cast(jlower_bound_slice_handle); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: iterateLowerBound + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_iterateLowerBound(JNIEnv*, jobject, + jlong jhandle) { + auto& lower_bound_slice_handle = + reinterpret_cast(jhandle) + ->iterate_lower_bound; + return GET_CPLUSPLUS_POINTER(lower_bound_slice_handle); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setTableFilter + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setTableFilter( + JNIEnv*, jobject, jlong jhandle, jlong jjni_table_filter_handle) { + auto* opt = reinterpret_cast(jhandle); + auto* jni_table_filter = + reinterpret_cast( + jjni_table_filter_handle); + opt->table_filter = jni_table_filter->GetTableFilterFunction(); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: autoPrefixMode + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_autoPrefixMode(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->auto_prefix_mode); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setAutoPrefixMode + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setAutoPrefixMode( + JNIEnv*, jobject, jlong jhandle, jboolean jauto_prefix_mode) { + auto* opt = reinterpret_cast(jhandle); + opt->auto_prefix_mode = static_cast(jauto_prefix_mode); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: timestamp + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_timestamp(JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + auto& timestamp_slice_handle = opt->timestamp; + return reinterpret_cast(timestamp_slice_handle); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setTimestamp + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setTimestamp(JNIEnv*, jobject, jlong jhandle, + jlong jtimestamp_slice_handle) { + auto* opt = reinterpret_cast(jhandle); + opt->timestamp = + reinterpret_cast(jtimestamp_slice_handle); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: iterStartTs + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_iterStartTs(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + auto& iter_start_ts_handle = opt->iter_start_ts; + return reinterpret_cast(iter_start_ts_handle); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setIterStartTs + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setIterStartTs(JNIEnv*, jobject, + jlong jhandle, + jlong jiter_start_ts_handle) { + auto* opt = reinterpret_cast(jhandle); + opt->iter_start_ts = + reinterpret_cast(jiter_start_ts_handle); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: deadline + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_deadline(JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->deadline.count()); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setDeadline + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setDeadline(JNIEnv*, jobject, jlong jhandle, + jlong jdeadline) { + auto* opt = reinterpret_cast(jhandle); + opt->deadline = std::chrono::microseconds(static_cast(jdeadline)); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: ioTimeout + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_ioTimeout(JNIEnv*, jobject, jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->io_timeout.count()); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setIoTimeout + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setIoTimeout(JNIEnv*, jobject, jlong jhandle, + jlong jio_timeout) { + auto* opt = reinterpret_cast(jhandle); + opt->io_timeout = + std::chrono::microseconds(static_cast(jio_timeout)); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: valueSizeSofLimit + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_valueSizeSoftLimit(JNIEnv*, jobject, + jlong jhandle) { + auto* opt = reinterpret_cast(jhandle); + return static_cast(opt->value_size_soft_limit); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setValueSizeSofLimit + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setValueSizeSoftLimit( + JNIEnv*, jobject, jlong jhandle, jlong jvalue_size_soft_limit) { + auto* opt = reinterpret_cast(jhandle); + opt->value_size_soft_limit = static_cast(jvalue_size_soft_limit); +} + +///////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::ComparatorOptions + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: newComparatorOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions(JNIEnv*, jclass) { + auto* comparator_opt = new ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions(); + return GET_CPLUSPLUS_POINTER(comparator_opt); +} + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: reusedSynchronisationType + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ComparatorOptions_reusedSynchronisationType( + JNIEnv*, jobject, jlong jhandle) { + auto* comparator_opt = + reinterpret_cast( + jhandle); + return ROCKSDB_NAMESPACE::ReusedSynchronisationTypeJni:: + toJavaReusedSynchronisationType( + comparator_opt->reused_synchronisation_type); +} + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: setReusedSynchronisationType + * Signature: (JB)V + */ +void Java_org_rocksdb_ComparatorOptions_setReusedSynchronisationType( + JNIEnv*, jobject, jlong jhandle, jbyte jreused_synhcronisation_type) { + auto* comparator_opt = + reinterpret_cast( + jhandle); + comparator_opt->reused_synchronisation_type = + ROCKSDB_NAMESPACE::ReusedSynchronisationTypeJni:: + toCppReusedSynchronisationType(jreused_synhcronisation_type); +} + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: useDirectBuffer + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ComparatorOptions_useDirectBuffer(JNIEnv*, jobject, + jlong jhandle) { + return static_cast( + reinterpret_cast( + jhandle) + ->direct_buffer); +} + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: setUseDirectBuffer + * Signature: (JZ)V + */ +void Java_org_rocksdb_ComparatorOptions_setUseDirectBuffer( + JNIEnv*, jobject, jlong jhandle, jboolean jdirect_buffer) { + reinterpret_cast(jhandle) + ->direct_buffer = jdirect_buffer == JNI_TRUE; +} + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: maxReusedBufferSize + * Signature: (J)I + */ +jint Java_org_rocksdb_ComparatorOptions_maxReusedBufferSize(JNIEnv*, jobject, + jlong jhandle) { + return static_cast( + reinterpret_cast( + jhandle) + ->max_reused_buffer_size); +} + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: setMaxReusedBufferSize + * Signature: (JI)V + */ +void Java_org_rocksdb_ComparatorOptions_setMaxReusedBufferSize( + JNIEnv*, jobject, jlong jhandle, jint jmax_reused_buffer_size) { + reinterpret_cast(jhandle) + ->max_reused_buffer_size = static_cast(jmax_reused_buffer_size); +} + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ComparatorOptions_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* comparator_opt = + reinterpret_cast( + jhandle); + assert(comparator_opt != nullptr); + delete comparator_opt; +} + +///////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::FlushOptions + +/* + * Class: org_rocksdb_FlushOptions + * Method: newFlushOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_FlushOptions_newFlushOptions(JNIEnv*, jclass) { + auto* flush_opt = new ROCKSDB_NAMESPACE::FlushOptions(); + return GET_CPLUSPLUS_POINTER(flush_opt); +} + +/* + * Class: org_rocksdb_FlushOptions + * Method: setWaitForFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_FlushOptions_setWaitForFlush(JNIEnv*, jobject, + jlong jhandle, + jboolean jwait) { + reinterpret_cast(jhandle)->wait = + static_cast(jwait); +} + +/* + * Class: org_rocksdb_FlushOptions + * Method: waitForFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_FlushOptions_waitForFlush(JNIEnv*, jobject, + jlong jhandle) { + return reinterpret_cast(jhandle)->wait; +} + +/* + * Class: org_rocksdb_FlushOptions + * Method: setAllowWriteStall + * Signature: (JZ)V + */ +void Java_org_rocksdb_FlushOptions_setAllowWriteStall( + JNIEnv*, jobject, jlong jhandle, jboolean jallow_write_stall) { + auto* flush_options = + reinterpret_cast(jhandle); + flush_options->allow_write_stall = jallow_write_stall == JNI_TRUE; +} + +/* + * Class: org_rocksdb_FlushOptions + * Method: allowWriteStall + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_FlushOptions_allowWriteStall(JNIEnv*, jobject, + jlong jhandle) { + auto* flush_options = + reinterpret_cast(jhandle); + return static_cast(flush_options->allow_write_stall); +} + +/* + * Class: org_rocksdb_FlushOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_FlushOptions_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* flush_opt = reinterpret_cast(jhandle); + assert(flush_opt != nullptr); + delete flush_opt; +} diff --git a/src/rocksdb/java/rocksjni/options_util.cc b/src/rocksdb/java/rocksjni/options_util.cc new file mode 100644 index 000000000..1a5fb9bb5 --- /dev/null +++ b/src/rocksdb/java/rocksjni/options_util.cc @@ -0,0 +1,195 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling C++ ROCKSDB_NAMESPACE::OptionsUtil methods from Java side. + +#include "rocksdb/utilities/options_util.h" + +#include + +#include + +#include "include/org_rocksdb_OptionsUtil.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksjni/portal.h" + +void build_column_family_descriptor_list( + JNIEnv* env, jobject jcfds, + std::vector& cf_descs) { + jmethodID add_mid = ROCKSDB_NAMESPACE::ListJni::getListAddMethodId(env); + if (add_mid == nullptr) { + // exception occurred accessing method + return; + } + + // Column family descriptor + for (ROCKSDB_NAMESPACE::ColumnFamilyDescriptor& cfd : cf_descs) { + // Construct a ColumnFamilyDescriptor java object + jobject jcfd = + ROCKSDB_NAMESPACE::ColumnFamilyDescriptorJni::construct(env, &cfd); + if (env->ExceptionCheck()) { + // exception occurred constructing object + if (jcfd != nullptr) { + env->DeleteLocalRef(jcfd); + } + return; + } + + // Add the object to java list. + jboolean rs = env->CallBooleanMethod(jcfds, add_mid, jcfd); + if (env->ExceptionCheck() || rs == JNI_FALSE) { + // exception occurred calling method, or could not add + if (jcfd != nullptr) { + env->DeleteLocalRef(jcfd); + } + return; + } + } +} + +/* + * Class: org_rocksdb_OptionsUtil + * Method: loadLatestOptions + * Signature: (Ljava/lang/String;JLjava/util/List;Z)V + */ +void Java_org_rocksdb_OptionsUtil_loadLatestOptions__Ljava_lang_String_2JJLjava_util_List_2Z( + JNIEnv* env, jclass /*jcls*/, jstring jdbpath, jlong jenv_handle, + jlong jdb_opts_handle, jobject jcfds, jboolean ignore_unknown_options) { + jboolean has_exception = JNI_FALSE; + auto db_path = + ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jdbpath, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + std::vector cf_descs; + ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::LoadLatestOptions( + db_path, reinterpret_cast(jenv_handle), + reinterpret_cast(jdb_opts_handle), + &cf_descs, ignore_unknown_options); + if (!s.ok()) { + // error, raise an exception + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } else { + build_column_family_descriptor_list(env, jcfds, cf_descs); + } +} + +/* + * Class: org_rocksdb_OptionsUtil + * Method: loadLatestOptions_1 + * Signature: (JLjava/lang/String;JLjava/util/List;)V + */ +void Java_org_rocksdb_OptionsUtil_loadLatestOptions__JLjava_lang_String_2JLjava_util_List_2( + JNIEnv* env, jclass /*jcls*/, jlong cfg_handle, jstring jdbpath, + jlong jdb_opts_handle, jobject jcfds) { + jboolean has_exception = JNI_FALSE; + auto db_path = + ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jdbpath, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + std::vector cf_descs; + auto* config_options = + reinterpret_cast(cfg_handle); + auto* db_options = + reinterpret_cast(jdb_opts_handle); + ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::LoadLatestOptions( + *config_options, db_path, db_options, &cf_descs); + if (!s.ok()) { + // error, raise an exception + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } else { + build_column_family_descriptor_list(env, jcfds, cf_descs); + } +} + +/* + * Class: org_rocksdb_OptionsUtil + * Method: loadOptionsFromFile + * Signature: (Ljava/lang/String;JJLjava/util/List;Z)V + */ +void Java_org_rocksdb_OptionsUtil_loadOptionsFromFile__Ljava_lang_String_2JJLjava_util_List_2Z( + JNIEnv* env, jclass /*jcls*/, jstring jopts_file_name, jlong jenv_handle, + jlong jdb_opts_handle, jobject jcfds, jboolean ignore_unknown_options) { + jboolean has_exception = JNI_FALSE; + auto opts_file_name = ROCKSDB_NAMESPACE::JniUtil::copyStdString( + env, jopts_file_name, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + std::vector cf_descs; + ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::LoadOptionsFromFile( + opts_file_name, reinterpret_cast(jenv_handle), + reinterpret_cast(jdb_opts_handle), + &cf_descs, ignore_unknown_options); + if (!s.ok()) { + // error, raise an exception + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } else { + build_column_family_descriptor_list(env, jcfds, cf_descs); + } +} + +/* + * Class: org_rocksdb_OptionsUtil + * Method: loadOptionsFromFile + * Signature: (JLjava/lang/String;JLjava/util/List;)V + */ +void Java_org_rocksdb_OptionsUtil_loadOptionsFromFile__JLjava_lang_String_2JLjava_util_List_2( + JNIEnv* env, jclass /*jcls*/, jlong cfg_handle, jstring jopts_file_name, + jlong jdb_opts_handle, jobject jcfds) { + jboolean has_exception = JNI_FALSE; + auto opts_file_name = ROCKSDB_NAMESPACE::JniUtil::copyStdString( + env, jopts_file_name, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + std::vector cf_descs; + auto* config_options = + reinterpret_cast(cfg_handle); + auto* db_options = + reinterpret_cast(jdb_opts_handle); + ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::LoadOptionsFromFile( + *config_options, opts_file_name, db_options, &cf_descs); + if (!s.ok()) { + // error, raise an exception + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } else { + build_column_family_descriptor_list(env, jcfds, cf_descs); + } +} + +/* + * Class: org_rocksdb_OptionsUtil + * Method: getLatestOptionsFileName + * Signature: (Ljava/lang/String;J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_OptionsUtil_getLatestOptionsFileName( + JNIEnv* env, jclass /*jcls*/, jstring jdbpath, jlong jenv_handle) { + jboolean has_exception = JNI_FALSE; + auto db_path = + ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jdbpath, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return nullptr; + } + std::string options_file_name; + ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::GetLatestOptionsFileName( + db_path, reinterpret_cast(jenv_handle), + &options_file_name); + if (!s.ok()) { + // error, raise an exception + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } else { + return env->NewStringUTF(options_file_name.c_str()); + } +} diff --git a/src/rocksdb/java/rocksjni/persistent_cache.cc b/src/rocksdb/java/rocksjni/persistent_cache.cc new file mode 100644 index 000000000..295d91798 --- /dev/null +++ b/src/rocksdb/java/rocksjni/persistent_cache.cc @@ -0,0 +1,60 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::PersistentCache. + +#include "rocksdb/persistent_cache.h" + +#include + +#include + +#include "include/org_rocksdb_PersistentCache.h" +#include "loggerjnicallback.h" +#include "portal.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_PersistentCache + * Method: newPersistentCache + * Signature: (JLjava/lang/String;JJZ)J + */ +jlong Java_org_rocksdb_PersistentCache_newPersistentCache( + JNIEnv* env, jclass, jlong jenv_handle, jstring jpath, jlong jsz, + jlong jlogger_handle, jboolean joptimized_for_nvm) { + auto* rocks_env = reinterpret_cast(jenv_handle); + jboolean has_exception = JNI_FALSE; + std::string path = + ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jpath, &has_exception); + if (has_exception == JNI_TRUE) { + return 0; + } + auto* logger = + reinterpret_cast*>( + jlogger_handle); + auto* cache = + new std::shared_ptr(nullptr); + ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::NewPersistentCache( + rocks_env, path, static_cast(jsz), *logger, + static_cast(joptimized_for_nvm), cache); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } + return GET_CPLUSPLUS_POINTER(cache); +} + +/* + * Class: org_rocksdb_PersistentCache + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_PersistentCache_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* cache = + reinterpret_cast*>( + jhandle); + delete cache; // delete std::shared_ptr +} diff --git a/src/rocksdb/java/rocksjni/portal.h b/src/rocksdb/java/rocksjni/portal.h new file mode 100644 index 000000000..340199507 --- /dev/null +++ b/src/rocksdb/java/rocksjni/portal.h @@ -0,0 +1,8745 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +// This file is designed for caching those frequently used IDs and provide +// efficient portal (i.e, a set of static functions) to access java code +// from c++. + +#ifndef JAVA_ROCKSJNI_PORTAL_H_ +#define JAVA_ROCKSJNI_PORTAL_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rocksdb/convenience.h" +#include "rocksdb/db.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/rate_limiter.h" +#include "rocksdb/status.h" +#include "rocksdb/table.h" +#include "rocksdb/utilities/backup_engine.h" +#include "rocksdb/utilities/memory_util.h" +#include "rocksdb/utilities/transaction_db.h" +#include "rocksdb/utilities/write_batch_with_index.h" +#include "rocksjni/compaction_filter_factory_jnicallback.h" +#include "rocksjni/comparatorjnicallback.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/event_listener_jnicallback.h" +#include "rocksjni/loggerjnicallback.h" +#include "rocksjni/table_filter_jnicallback.h" +#include "rocksjni/trace_writer_jnicallback.h" +#include "rocksjni/transaction_notifier_jnicallback.h" +#include "rocksjni/wal_filter_jnicallback.h" +#include "rocksjni/writebatchhandlerjnicallback.h" + +// Remove macro on windows +#ifdef DELETE +#undef DELETE +#endif + +namespace ROCKSDB_NAMESPACE { + +class JavaClass { + public: + /** + * Gets and initializes a Java Class + * + * @param env A pointer to the Java environment + * @param jclazz_name The fully qualified JNI name of the Java Class + * e.g. "java/lang/String" + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env, const char* jclazz_name) { + jclass jclazz = env->FindClass(jclazz_name); + assert(jclazz != nullptr); + return jclazz; + } +}; + +// Native class template +template +class RocksDBNativeClass : public JavaClass {}; + +// Native class template for sub-classes of RocksMutableObject +template +class NativeRocksMutableObject : public RocksDBNativeClass { + public: + /** + * Gets the Java Method ID for the + * RocksMutableObject#setNativeHandle(long, boolean) method + * + * @param env A pointer to the Java environment + * @return The Java Method ID or nullptr the RocksMutableObject class cannot + * be accessed, or if one of the NoSuchMethodError, + * ExceptionInInitializerError or OutOfMemoryError exceptions is thrown + */ + static jmethodID getSetNativeHandleMethod(JNIEnv* env) { + static jclass jclazz = DERIVED::getJClass(env); + if (jclazz == nullptr) { + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "setNativeHandle", "(JZ)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Sets the C++ object pointer handle in the Java object + * + * @param env A pointer to the Java environment + * @param jobj The Java object on which to set the pointer handle + * @param ptr The C++ object pointer + * @param java_owns_handle JNI_TRUE if ownership of the C++ object is + * managed by the Java object + * + * @return true if a Java exception is pending, false otherwise + */ + static bool setHandle(JNIEnv* env, jobject jobj, PTR ptr, + jboolean java_owns_handle) { + assert(jobj != nullptr); + static jmethodID mid = getSetNativeHandleMethod(env); + if (mid == nullptr) { + return true; // signal exception + } + + env->CallVoidMethod(jobj, mid, GET_CPLUSPLUS_POINTER(ptr), + java_owns_handle); + if (env->ExceptionCheck()) { + return true; // signal exception + } + + return false; + } +}; + +// Java Exception template +template +class JavaException : public JavaClass { + public: + /** + * Create and throw a java exception with the provided message + * + * @param env A pointer to the Java environment + * @param msg The message for the exception + * + * @return true if an exception was thrown, false otherwise + */ + static bool ThrowNew(JNIEnv* env, const std::string& msg) { + jclass jclazz = DERIVED::getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + std::cerr << "JavaException::ThrowNew - Error: unexpected exception!" + << std::endl; + return env->ExceptionCheck(); + } + + const jint rs = env->ThrowNew(jclazz, msg.c_str()); + if (rs != JNI_OK) { + // exception could not be thrown + std::cerr << "JavaException::ThrowNew - Fatal: could not throw exception!" + << std::endl; + return env->ExceptionCheck(); + } + + return true; + } +}; + +// The portal class for java.lang.IllegalArgumentException +class IllegalArgumentExceptionJni + : public JavaException { + public: + /** + * Get the Java Class java.lang.IllegalArgumentException + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaException::getJClass(env, "java/lang/IllegalArgumentException"); + } + + /** + * Create and throw a Java IllegalArgumentException with the provided status + * + * If s.ok() == true, then this function will not throw any exception. + * + * @param env A pointer to the Java environment + * @param s The status for the exception + * + * @return true if an exception was thrown, false otherwise + */ + static bool ThrowNew(JNIEnv* env, const Status& s) { + assert(!s.ok()); + if (s.ok()) { + return false; + } + + // get the IllegalArgumentException class + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + std::cerr << "IllegalArgumentExceptionJni::ThrowNew/class - Error: " + "unexpected exception!" + << std::endl; + return env->ExceptionCheck(); + } + + return JavaException::ThrowNew(env, s.ToString()); + } +}; + +// The portal class for org.rocksdb.Status.Code +class CodeJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.Status.Code + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/Status$Code"); + } + + /** + * Get the Java Method: Status.Code#getValue + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getValueMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "getValue", "()b"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.Status.SubCode +class SubCodeJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.Status.SubCode + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/Status$SubCode"); + } + + /** + * Get the Java Method: Status.SubCode#getValue + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getValueMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "getValue", "()b"); + assert(mid != nullptr); + return mid; + } + + static ROCKSDB_NAMESPACE::Status::SubCode toCppSubCode( + const jbyte jsub_code) { + switch (jsub_code) { + case 0x0: + return ROCKSDB_NAMESPACE::Status::SubCode::kNone; + case 0x1: + return ROCKSDB_NAMESPACE::Status::SubCode::kMutexTimeout; + case 0x2: + return ROCKSDB_NAMESPACE::Status::SubCode::kLockTimeout; + case 0x3: + return ROCKSDB_NAMESPACE::Status::SubCode::kLockLimit; + case 0x4: + return ROCKSDB_NAMESPACE::Status::SubCode::kNoSpace; + case 0x5: + return ROCKSDB_NAMESPACE::Status::SubCode::kDeadlock; + case 0x6: + return ROCKSDB_NAMESPACE::Status::SubCode::kStaleFile; + case 0x7: + return ROCKSDB_NAMESPACE::Status::SubCode::kMemoryLimit; + + case 0x7F: + default: + return ROCKSDB_NAMESPACE::Status::SubCode::kNone; + } + } +}; + +// The portal class for org.rocksdb.Status +class StatusJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.Status + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/Status"); + } + + /** + * Get the Java Method: Status#getCode + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getCodeMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "getCode", "()Lorg/rocksdb/Status$Code;"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: Status#getSubCode + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getSubCodeMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "getSubCode", + "()Lorg/rocksdb/Status$SubCode;"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: Status#getState + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getStateMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "getState", "()Ljava/lang/String;"); + assert(mid != nullptr); + return mid; + } + + /** + * Create a new Java org.rocksdb.Status object with the same properties as + * the provided C++ ROCKSDB_NAMESPACE::Status object + * + * @param env A pointer to the Java environment + * @param status The ROCKSDB_NAMESPACE::Status object + * + * @return A reference to a Java org.rocksdb.Status object, or nullptr + * if an an exception occurs + */ + static jobject construct(JNIEnv* env, const Status& status) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = + env->GetMethodID(jclazz, "", "(BBLjava/lang/String;)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + // convert the Status state for Java + jstring jstate = nullptr; + if (status.getState() != nullptr) { + const char* const state = status.getState(); + jstate = env->NewStringUTF(state); + if (env->ExceptionCheck()) { + if (jstate != nullptr) { + env->DeleteLocalRef(jstate); + } + return nullptr; + } + } + + jobject jstatus = + env->NewObject(jclazz, mid, toJavaStatusCode(status.code()), + toJavaStatusSubCode(status.subcode()), jstate); + if (env->ExceptionCheck()) { + // exception occurred + if (jstate != nullptr) { + env->DeleteLocalRef(jstate); + } + return nullptr; + } + + if (jstate != nullptr) { + env->DeleteLocalRef(jstate); + } + + return jstatus; + } + + static jobject construct(JNIEnv* env, const Status* status) { + return construct(env, *status); + } + + // Returns the equivalent org.rocksdb.Status.Code for the provided + // C++ ROCKSDB_NAMESPACE::Status::Code enum + static jbyte toJavaStatusCode(const ROCKSDB_NAMESPACE::Status::Code& code) { + switch (code) { + case ROCKSDB_NAMESPACE::Status::Code::kOk: + return 0x0; + case ROCKSDB_NAMESPACE::Status::Code::kNotFound: + return 0x1; + case ROCKSDB_NAMESPACE::Status::Code::kCorruption: + return 0x2; + case ROCKSDB_NAMESPACE::Status::Code::kNotSupported: + return 0x3; + case ROCKSDB_NAMESPACE::Status::Code::kInvalidArgument: + return 0x4; + case ROCKSDB_NAMESPACE::Status::Code::kIOError: + return 0x5; + case ROCKSDB_NAMESPACE::Status::Code::kMergeInProgress: + return 0x6; + case ROCKSDB_NAMESPACE::Status::Code::kIncomplete: + return 0x7; + case ROCKSDB_NAMESPACE::Status::Code::kShutdownInProgress: + return 0x8; + case ROCKSDB_NAMESPACE::Status::Code::kTimedOut: + return 0x9; + case ROCKSDB_NAMESPACE::Status::Code::kAborted: + return 0xA; + case ROCKSDB_NAMESPACE::Status::Code::kBusy: + return 0xB; + case ROCKSDB_NAMESPACE::Status::Code::kExpired: + return 0xC; + case ROCKSDB_NAMESPACE::Status::Code::kTryAgain: + return 0xD; + case ROCKSDB_NAMESPACE::Status::Code::kColumnFamilyDropped: + return 0xE; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent org.rocksdb.Status.SubCode for the provided + // C++ ROCKSDB_NAMESPACE::Status::SubCode enum + static jbyte toJavaStatusSubCode( + const ROCKSDB_NAMESPACE::Status::SubCode& subCode) { + switch (subCode) { + case ROCKSDB_NAMESPACE::Status::SubCode::kNone: + return 0x0; + case ROCKSDB_NAMESPACE::Status::SubCode::kMutexTimeout: + return 0x1; + case ROCKSDB_NAMESPACE::Status::SubCode::kLockTimeout: + return 0x2; + case ROCKSDB_NAMESPACE::Status::SubCode::kLockLimit: + return 0x3; + case ROCKSDB_NAMESPACE::Status::SubCode::kNoSpace: + return 0x4; + case ROCKSDB_NAMESPACE::Status::SubCode::kDeadlock: + return 0x5; + case ROCKSDB_NAMESPACE::Status::SubCode::kStaleFile: + return 0x6; + case ROCKSDB_NAMESPACE::Status::SubCode::kMemoryLimit: + return 0x7; + default: + return 0x7F; // undefined + } + } + + static std::unique_ptr toCppStatus( + const jbyte jcode_value, const jbyte jsub_code_value) { + std::unique_ptr status; + switch (jcode_value) { + case 0x0: + // Ok + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::OK())); + break; + case 0x1: + // NotFound + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::NotFound( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x2: + // Corruption + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::Corruption( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x3: + // NotSupported + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status( + ROCKSDB_NAMESPACE::Status::NotSupported( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode( + jsub_code_value)))); + break; + case 0x4: + // InvalidArgument + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status( + ROCKSDB_NAMESPACE::Status::InvalidArgument( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode( + jsub_code_value)))); + break; + case 0x5: + // IOError + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::IOError( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x6: + // MergeInProgress + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status( + ROCKSDB_NAMESPACE::Status::MergeInProgress( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode( + jsub_code_value)))); + break; + case 0x7: + // Incomplete + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::Incomplete( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0x8: + // ShutdownInProgress + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status( + ROCKSDB_NAMESPACE::Status::ShutdownInProgress( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode( + jsub_code_value)))); + break; + case 0x9: + // TimedOut + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::TimedOut( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0xA: + // Aborted + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::Aborted( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0xB: + // Busy + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::Busy( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0xC: + // Expired + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::Expired( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0xD: + // TryAgain + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::TryAgain( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode(jsub_code_value)))); + break; + case 0xE: + // ColumnFamilyDropped + status = std::unique_ptr( + new ROCKSDB_NAMESPACE::Status( + ROCKSDB_NAMESPACE::Status::ColumnFamilyDropped( + ROCKSDB_NAMESPACE::SubCodeJni::toCppSubCode( + jsub_code_value)))); + break; + case 0x7F: + default: + return nullptr; + } + return status; + } + + // Returns the equivalent ROCKSDB_NAMESPACE::Status for the Java + // org.rocksdb.Status + static std::unique_ptr toCppStatus( + JNIEnv* env, const jobject jstatus) { + jmethodID mid_code = getCodeMethod(env); + if (mid_code == nullptr) { + // exception occurred + return nullptr; + } + jobject jcode = env->CallObjectMethod(jstatus, mid_code); + if (env->ExceptionCheck()) { + // exception occurred + return nullptr; + } + + jmethodID mid_code_value = ROCKSDB_NAMESPACE::CodeJni::getValueMethod(env); + if (mid_code_value == nullptr) { + // exception occurred + return nullptr; + } + jbyte jcode_value = env->CallByteMethod(jcode, mid_code_value); + if (env->ExceptionCheck()) { + // exception occurred + if (jcode != nullptr) { + env->DeleteLocalRef(jcode); + } + return nullptr; + } + + jmethodID mid_subCode = getSubCodeMethod(env); + if (mid_subCode == nullptr) { + // exception occurred + return nullptr; + } + jobject jsubCode = env->CallObjectMethod(jstatus, mid_subCode); + if (env->ExceptionCheck()) { + // exception occurred + if (jcode != nullptr) { + env->DeleteLocalRef(jcode); + } + return nullptr; + } + + jbyte jsub_code_value = 0x0; // None + if (jsubCode != nullptr) { + jmethodID mid_subCode_value = + ROCKSDB_NAMESPACE::SubCodeJni::getValueMethod(env); + if (mid_subCode_value == nullptr) { + // exception occurred + return nullptr; + } + jsub_code_value = env->CallByteMethod(jsubCode, mid_subCode_value); + if (env->ExceptionCheck()) { + // exception occurred + if (jcode != nullptr) { + env->DeleteLocalRef(jcode); + } + return nullptr; + } + } + + jmethodID mid_state = getStateMethod(env); + if (mid_state == nullptr) { + // exception occurred + return nullptr; + } + jobject jstate = env->CallObjectMethod(jstatus, mid_state); + if (env->ExceptionCheck()) { + // exception occurred + if (jsubCode != nullptr) { + env->DeleteLocalRef(jsubCode); + } + if (jcode != nullptr) { + env->DeleteLocalRef(jcode); + } + return nullptr; + } + + std::unique_ptr status = + toCppStatus(jcode_value, jsub_code_value); + + // delete all local refs + if (jstate != nullptr) { + env->DeleteLocalRef(jstate); + } + if (jsubCode != nullptr) { + env->DeleteLocalRef(jsubCode); + } + if (jcode != nullptr) { + env->DeleteLocalRef(jcode); + } + + return status; + } +}; + +// The portal class for org.rocksdb.RocksDBException +class RocksDBExceptionJni : public JavaException { + public: + /** + * Get the Java Class org.rocksdb.RocksDBException + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaException::getJClass(env, "org/rocksdb/RocksDBException"); + } + + /** + * Create and throw a Java RocksDBException with the provided message + * + * @param env A pointer to the Java environment + * @param msg The message for the exception + * + * @return true if an exception was thrown, false otherwise + */ + static bool ThrowNew(JNIEnv* env, const std::string& msg) { + return JavaException::ThrowNew(env, msg); + } + + /** + * Create and throw a Java RocksDBException with the provided status + * + * If s->ok() == true, then this function will not throw any exception. + * + * @param env A pointer to the Java environment + * @param s The status for the exception + * + * @return true if an exception was thrown, false otherwise + */ + static bool ThrowNew(JNIEnv* env, std::unique_ptr& s) { + return ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, *(s.get())); + } + + /** + * Create and throw a Java RocksDBException with the provided status + * + * If s.ok() == true, then this function will not throw any exception. + * + * @param env A pointer to the Java environment + * @param s The status for the exception + * + * @return true if an exception was thrown, false otherwise + */ + static bool ThrowNew(JNIEnv* env, const Status& s) { + if (s.ok()) { + return false; + } + + // get the RocksDBException class + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + std::cerr << "RocksDBExceptionJni::ThrowNew/class - Error: unexpected " + "exception!" + << std::endl; + return env->ExceptionCheck(); + } + + // get the constructor of org.rocksdb.RocksDBException + jmethodID mid = + env->GetMethodID(jclazz, "", "(Lorg/rocksdb/Status;)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + std::cerr + << "RocksDBExceptionJni::ThrowNew/cstr - Error: unexpected exception!" + << std::endl; + return env->ExceptionCheck(); + } + + // get the Java status object + jobject jstatus = StatusJni::construct(env, s); + if (jstatus == nullptr) { + // exception occcurred + std::cerr << "RocksDBExceptionJni::ThrowNew/StatusJni - Error: " + "unexpected exception!" + << std::endl; + return env->ExceptionCheck(); + } + + // construct the RocksDBException + jthrowable rocksdb_exception = + reinterpret_cast(env->NewObject(jclazz, mid, jstatus)); + if (env->ExceptionCheck()) { + if (jstatus != nullptr) { + env->DeleteLocalRef(jstatus); + } + if (rocksdb_exception != nullptr) { + env->DeleteLocalRef(rocksdb_exception); + } + std::cerr << "RocksDBExceptionJni::ThrowNew/NewObject - Error: " + "unexpected exception!" + << std::endl; + return true; + } + + // throw the RocksDBException + const jint rs = env->Throw(rocksdb_exception); + if (rs != JNI_OK) { + // exception could not be thrown + std::cerr + << "RocksDBExceptionJni::ThrowNew - Fatal: could not throw exception!" + << std::endl; + if (jstatus != nullptr) { + env->DeleteLocalRef(jstatus); + } + if (rocksdb_exception != nullptr) { + env->DeleteLocalRef(rocksdb_exception); + } + return env->ExceptionCheck(); + } + + if (jstatus != nullptr) { + env->DeleteLocalRef(jstatus); + } + if (rocksdb_exception != nullptr) { + env->DeleteLocalRef(rocksdb_exception); + } + + return true; + } + + /** + * Create and throw a Java RocksDBException with the provided message + * and status + * + * If s.ok() == true, then this function will not throw any exception. + * + * @param env A pointer to the Java environment + * @param msg The message for the exception + * @param s The status for the exception + * + * @return true if an exception was thrown, false otherwise + */ + static bool ThrowNew(JNIEnv* env, const std::string& msg, const Status& s) { + assert(!s.ok()); + if (s.ok()) { + return false; + } + + // get the RocksDBException class + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + std::cerr << "RocksDBExceptionJni::ThrowNew/class - Error: unexpected " + "exception!" + << std::endl; + return env->ExceptionCheck(); + } + + // get the constructor of org.rocksdb.RocksDBException + jmethodID mid = env->GetMethodID( + jclazz, "", "(Ljava/lang/String;Lorg/rocksdb/Status;)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + std::cerr + << "RocksDBExceptionJni::ThrowNew/cstr - Error: unexpected exception!" + << std::endl; + return env->ExceptionCheck(); + } + + jstring jmsg = env->NewStringUTF(msg.c_str()); + if (jmsg == nullptr) { + // exception thrown: OutOfMemoryError + std::cerr + << "RocksDBExceptionJni::ThrowNew/msg - Error: unexpected exception!" + << std::endl; + return env->ExceptionCheck(); + } + + // get the Java status object + jobject jstatus = StatusJni::construct(env, s); + if (jstatus == nullptr) { + // exception occcurred + std::cerr << "RocksDBExceptionJni::ThrowNew/StatusJni - Error: " + "unexpected exception!" + << std::endl; + if (jmsg != nullptr) { + env->DeleteLocalRef(jmsg); + } + return env->ExceptionCheck(); + } + + // construct the RocksDBException + jthrowable rocksdb_exception = reinterpret_cast( + env->NewObject(jclazz, mid, jmsg, jstatus)); + if (env->ExceptionCheck()) { + if (jstatus != nullptr) { + env->DeleteLocalRef(jstatus); + } + if (jmsg != nullptr) { + env->DeleteLocalRef(jmsg); + } + if (rocksdb_exception != nullptr) { + env->DeleteLocalRef(rocksdb_exception); + } + std::cerr << "RocksDBExceptionJni::ThrowNew/NewObject - Error: " + "unexpected exception!" + << std::endl; + return true; + } + + // throw the RocksDBException + const jint rs = env->Throw(rocksdb_exception); + if (rs != JNI_OK) { + // exception could not be thrown + std::cerr + << "RocksDBExceptionJni::ThrowNew - Fatal: could not throw exception!" + << std::endl; + if (jstatus != nullptr) { + env->DeleteLocalRef(jstatus); + } + if (jmsg != nullptr) { + env->DeleteLocalRef(jmsg); + } + if (rocksdb_exception != nullptr) { + env->DeleteLocalRef(rocksdb_exception); + } + return env->ExceptionCheck(); + } + + if (jstatus != nullptr) { + env->DeleteLocalRef(jstatus); + } + if (jmsg != nullptr) { + env->DeleteLocalRef(jmsg); + } + if (rocksdb_exception != nullptr) { + env->DeleteLocalRef(rocksdb_exception); + } + + return true; + } + + /** + * Get the Java Method: RocksDBException#getStatus + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getStatusMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "getStatus", "()Lorg/rocksdb/Status;"); + assert(mid != nullptr); + return mid; + } + + static std::unique_ptr toCppStatus( + JNIEnv* env, jthrowable jrocksdb_exception) { + if (!env->IsInstanceOf(jrocksdb_exception, getJClass(env))) { + // not an instance of RocksDBException + return nullptr; + } + + // get the java status object + jmethodID mid = getStatusMethod(env); + if (mid == nullptr) { + // exception occurred accessing class or method + return nullptr; + } + + jobject jstatus = env->CallObjectMethod(jrocksdb_exception, mid); + if (env->ExceptionCheck()) { + // exception occurred + return nullptr; + } + + if (jstatus == nullptr) { + return nullptr; // no status available + } + + return ROCKSDB_NAMESPACE::StatusJni::toCppStatus(env, jstatus); + } +}; + +// The portal class for java.util.List +class ListJni : public JavaClass { + public: + /** + * Get the Java Class java.util.List + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getListClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/List"); + } + + /** + * Get the Java Class java.util.ArrayList + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getArrayListClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/ArrayList"); + } + + /** + * Get the Java Class java.util.Iterator + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getIteratorClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/Iterator"); + } + + /** + * Get the Java Method: List#iterator + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getIteratorMethod(JNIEnv* env) { + jclass jlist_clazz = getListClass(env); + if (jlist_clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jlist_clazz, "iterator", "()Ljava/util/Iterator;"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: Iterator#hasNext + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getHasNextMethod(JNIEnv* env) { + jclass jiterator_clazz = getIteratorClass(env); + if (jiterator_clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jiterator_clazz, "hasNext", "()Z"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: Iterator#next + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getNextMethod(JNIEnv* env) { + jclass jiterator_clazz = getIteratorClass(env); + if (jiterator_clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jiterator_clazz, "next", "()Ljava/lang/Object;"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: ArrayList constructor + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getArrayListConstructorMethodId(JNIEnv* env) { + jclass jarray_list_clazz = getArrayListClass(env); + if (jarray_list_clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + static jmethodID mid = + env->GetMethodID(jarray_list_clazz, "", "(I)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: List#add + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getListAddMethodId(JNIEnv* env) { + jclass jlist_clazz = getListClass(env); + if (jlist_clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jlist_clazz, "add", "(Ljava/lang/Object;)Z"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for java.lang.Byte +class ByteJni : public JavaClass { + public: + /** + * Get the Java Class java.lang.Byte + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/lang/Byte"); + } + + /** + * Get the Java Class byte[] + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getArrayJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "[B"); + } + + /** + * Creates a new 2-dimensional Java Byte Array byte[][] + * + * @param env A pointer to the Java environment + * @param len The size of the first dimension + * + * @return A reference to the Java byte[][] or nullptr if an exception occurs + */ + static jobjectArray new2dByteArray(JNIEnv* env, const jsize len) { + jclass clazz = getArrayJClass(env); + if (clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + return env->NewObjectArray(len, clazz, nullptr); + } + + /** + * Get the Java Method: Byte#byteValue + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getByteValueMethod(JNIEnv* env) { + jclass clazz = getJClass(env); + if (clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(clazz, "byteValue", "()B"); + assert(mid != nullptr); + return mid; + } + + /** + * Calls the Java Method: Byte#valueOf, returning a constructed Byte jobject + * + * @param env A pointer to the Java environment + * + * @return A constructing Byte object or nullptr if the class or method id + * could not be retrieved, or an exception occurred + */ + static jobject valueOf(JNIEnv* env, jbyte jprimitive_byte) { + jclass clazz = getJClass(env); + if (clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetStaticMethodID(clazz, "valueOf", "(B)Ljava/lang/Byte;"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + const jobject jbyte_obj = + env->CallStaticObjectMethod(clazz, mid, jprimitive_byte); + if (env->ExceptionCheck()) { + // exception occurred + return nullptr; + } + + return jbyte_obj; + } +}; + +// The portal class for java.nio.ByteBuffer +class ByteBufferJni : public JavaClass { + public: + /** + * Get the Java Class java.nio.ByteBuffer + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/nio/ByteBuffer"); + } + + /** + * Get the Java Method: ByteBuffer#allocate + * + * @param env A pointer to the Java environment + * @param jbytebuffer_clazz if you have a reference to a ByteBuffer class, or + * nullptr + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getAllocateMethodId(JNIEnv* env, + jclass jbytebuffer_clazz = nullptr) { + const jclass jclazz = + jbytebuffer_clazz == nullptr ? getJClass(env) : jbytebuffer_clazz; + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetStaticMethodID(jclazz, "allocate", "(I)Ljava/nio/ByteBuffer;"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: ByteBuffer#array + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getArrayMethodId(JNIEnv* env, + jclass jbytebuffer_clazz = nullptr) { + const jclass jclazz = + jbytebuffer_clazz == nullptr ? getJClass(env) : jbytebuffer_clazz; + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "array", "()[B"); + assert(mid != nullptr); + return mid; + } + + static jobject construct(JNIEnv* env, const bool direct, + const size_t capacity, + jclass jbytebuffer_clazz = nullptr) { + return constructWith(env, direct, nullptr, capacity, jbytebuffer_clazz); + } + + static jobject constructWith(JNIEnv* env, const bool direct, const char* buf, + const size_t capacity, + jclass jbytebuffer_clazz = nullptr) { + if (direct) { + bool allocated = false; + if (buf == nullptr) { + buf = new char[capacity]; + allocated = true; + } + jobject jbuf = env->NewDirectByteBuffer(const_cast(buf), + static_cast(capacity)); + if (jbuf == nullptr) { + // exception occurred + if (allocated) { + delete[] static_cast(buf); + } + return nullptr; + } + return jbuf; + } else { + const jclass jclazz = + jbytebuffer_clazz == nullptr ? getJClass(env) : jbytebuffer_clazz; + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + const jmethodID jmid_allocate = + getAllocateMethodId(env, jbytebuffer_clazz); + if (jmid_allocate == nullptr) { + // exception occurred accessing class, or NoSuchMethodException or + // OutOfMemoryError + return nullptr; + } + const jobject jbuf = env->CallStaticObjectMethod( + jclazz, jmid_allocate, static_cast(capacity)); + if (env->ExceptionCheck()) { + // exception occurred + return nullptr; + } + + // set buffer data? + if (buf != nullptr) { + jbyteArray jarray = array(env, jbuf, jbytebuffer_clazz); + if (jarray == nullptr) { + // exception occurred + env->DeleteLocalRef(jbuf); + return nullptr; + } + + jboolean is_copy = JNI_FALSE; + jbyte* ja = reinterpret_cast( + env->GetPrimitiveArrayCritical(jarray, &is_copy)); + if (ja == nullptr) { + // exception occurred + env->DeleteLocalRef(jarray); + env->DeleteLocalRef(jbuf); + return nullptr; + } + + memcpy(ja, const_cast(buf), capacity); + + env->ReleasePrimitiveArrayCritical(jarray, ja, 0); + + env->DeleteLocalRef(jarray); + } + + return jbuf; + } + } + + static jbyteArray array(JNIEnv* env, const jobject& jbyte_buffer, + jclass jbytebuffer_clazz = nullptr) { + const jmethodID mid = getArrayMethodId(env, jbytebuffer_clazz); + if (mid == nullptr) { + // exception occurred accessing class, or NoSuchMethodException or + // OutOfMemoryError + return nullptr; + } + const jobject jarray = env->CallObjectMethod(jbyte_buffer, mid); + if (env->ExceptionCheck()) { + // exception occurred + return nullptr; + } + return static_cast(jarray); + } +}; + +// The portal class for java.lang.Integer +class IntegerJni : public JavaClass { + public: + /** + * Get the Java Class java.lang.Integer + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/lang/Integer"); + } + + static jobject valueOf(JNIEnv* env, jint jprimitive_int) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = + env->GetStaticMethodID(jclazz, "valueOf", "(I)Ljava/lang/Integer;"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + const jobject jinteger_obj = + env->CallStaticObjectMethod(jclazz, mid, jprimitive_int); + if (env->ExceptionCheck()) { + // exception occurred + return nullptr; + } + + return jinteger_obj; + } +}; + +// The portal class for java.lang.Long +class LongJni : public JavaClass { + public: + /** + * Get the Java Class java.lang.Long + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/lang/Long"); + } + + static jobject valueOf(JNIEnv* env, jlong jprimitive_long) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = + env->GetStaticMethodID(jclazz, "valueOf", "(J)Ljava/lang/Long;"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + const jobject jlong_obj = + env->CallStaticObjectMethod(jclazz, mid, jprimitive_long); + if (env->ExceptionCheck()) { + // exception occurred + return nullptr; + } + + return jlong_obj; + } +}; + +// The portal class for java.lang.StringBuilder +class StringBuilderJni : public JavaClass { + public: + /** + * Get the Java Class java.lang.StringBuilder + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/lang/StringBuilder"); + } + + /** + * Get the Java Method: StringBuilder#append + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getListAddMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID( + jclazz, "append", "(Ljava/lang/String;)Ljava/lang/StringBuilder;"); + assert(mid != nullptr); + return mid; + } + + /** + * Appends a C-style string to a StringBuilder + * + * @param env A pointer to the Java environment + * @param jstring_builder Reference to a java.lang.StringBuilder + * @param c_str A C-style string to append to the StringBuilder + * + * @return A reference to the updated StringBuilder, or a nullptr if + * an exception occurs + */ + static jobject append(JNIEnv* env, jobject jstring_builder, + const char* c_str) { + jmethodID mid = getListAddMethodId(env); + if (mid == nullptr) { + // exception occurred accessing class or method + return nullptr; + } + + jstring new_value_str = env->NewStringUTF(c_str); + if (new_value_str == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + jobject jresult_string_builder = + env->CallObjectMethod(jstring_builder, mid, new_value_str); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(new_value_str); + return nullptr; + } + + return jresult_string_builder; + } +}; + +// various utility functions for working with RocksDB and JNI +class JniUtil { + public: + /** + * Detect if jlong overflows size_t + * + * @param jvalue the jlong value + * + * @return + */ + inline static Status check_if_jlong_fits_size_t(const jlong& jvalue) { + Status s = Status::OK(); + if (static_cast(jvalue) > std::numeric_limits::max()) { + s = Status::InvalidArgument(Slice("jlong overflows 32 bit value.")); + } + return s; + } + + /** + * Obtains a reference to the JNIEnv from + * the JVM + * + * If the current thread is not attached to the JavaVM + * then it will be attached so as to retrieve the JNIEnv + * + * If a thread is attached, it must later be manually + * released by calling JavaVM::DetachCurrentThread. + * This can be handled by always matching calls to this + * function with calls to {@link JniUtil::releaseJniEnv(JavaVM*, jboolean)} + * + * @param jvm (IN) A pointer to the JavaVM instance + * @param attached (OUT) A pointer to a boolean which + * will be set to JNI_TRUE if we had to attach the thread + * + * @return A pointer to the JNIEnv or nullptr if a fatal error + * occurs and the JNIEnv cannot be retrieved + */ + static JNIEnv* getJniEnv(JavaVM* jvm, jboolean* attached) { + assert(jvm != nullptr); + + JNIEnv* env; + const jint env_rs = + jvm->GetEnv(reinterpret_cast(&env), JNI_VERSION_1_6); + + if (env_rs == JNI_OK) { + // current thread is already attached, return the JNIEnv + *attached = JNI_FALSE; + return env; + } else if (env_rs == JNI_EDETACHED) { + // current thread is not attached, attempt to attach + const jint rs_attach = + jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); + if (rs_attach == JNI_OK) { + *attached = JNI_TRUE; + return env; + } else { + // error, could not attach the thread + std::cerr << "JniUtil::getJniEnv - Fatal: could not attach current " + "thread to JVM!" + << std::endl; + return nullptr; + } + } else if (env_rs == JNI_EVERSION) { + // error, JDK does not support JNI_VERSION_1_6+ + std::cerr + << "JniUtil::getJniEnv - Fatal: JDK does not support JNI_VERSION_1_6" + << std::endl; + return nullptr; + } else { + std::cerr << "JniUtil::getJniEnv - Fatal: Unknown error: env_rs=" + << env_rs << std::endl; + return nullptr; + } + } + + /** + * Counterpart to {@link JniUtil::getJniEnv(JavaVM*, jboolean*)} + * + * Detachess the current thread from the JVM if it was previously + * attached + * + * @param jvm (IN) A pointer to the JavaVM instance + * @param attached (IN) JNI_TRUE if we previously had to attach the thread + * to the JavaVM to get the JNIEnv + */ + static void releaseJniEnv(JavaVM* jvm, jboolean& attached) { + assert(jvm != nullptr); + if (attached == JNI_TRUE) { + const jint rs_detach = jvm->DetachCurrentThread(); + assert(rs_detach == JNI_OK); + if (rs_detach != JNI_OK) { + std::cerr << "JniUtil::getJniEnv - Warn: Unable to detach current " + "thread from JVM!" + << std::endl; + } + } + } + + /** + * Copies a Java String[] to a C++ std::vector + * + * @param env (IN) A pointer to the java environment + * @param jss (IN) The Java String array to copy + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError or ArrayIndexOutOfBoundsException + * exception occurs + * + * @return A std::vector containing copies of the Java strings + */ + static std::vector copyStrings(JNIEnv* env, jobjectArray jss, + jboolean* has_exception) { + return ROCKSDB_NAMESPACE::JniUtil::copyStrings( + env, jss, env->GetArrayLength(jss), has_exception); + } + + /** + * Copies a Java String[] to a C++ std::vector + * + * @param env (IN) A pointer to the java environment + * @param jss (IN) The Java String array to copy + * @param jss_len (IN) The length of the Java String array to copy + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError or ArrayIndexOutOfBoundsException + * exception occurs + * + * @return A std::vector containing copies of the Java strings + */ + static std::vector copyStrings(JNIEnv* env, jobjectArray jss, + const jsize jss_len, + jboolean* has_exception) { + std::vector strs; + strs.reserve(jss_len); + for (jsize i = 0; i < jss_len; i++) { + jobject js = env->GetObjectArrayElement(jss, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + *has_exception = JNI_TRUE; + return strs; + } + + jstring jstr = static_cast(js); + const char* str = env->GetStringUTFChars(jstr, nullptr); + if (str == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(js); + *has_exception = JNI_TRUE; + return strs; + } + + strs.push_back(std::string(str)); + + env->ReleaseStringUTFChars(jstr, str); + env->DeleteLocalRef(js); + } + + *has_exception = JNI_FALSE; + return strs; + } + + /** + * Copies a jstring to a C-style null-terminated byte string + * and releases the original jstring + * + * The jstring is copied as UTF-8 + * + * If an exception occurs, then JNIEnv::ExceptionCheck() + * will have been called + * + * @param env (IN) A pointer to the java environment + * @param js (IN) The java string to copy + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError exception occurs + * + * @return A pointer to the copied string, or a + * nullptr if has_exception == JNI_TRUE + */ + static std::unique_ptr copyString(JNIEnv* env, jstring js, + jboolean* has_exception) { + const char* utf = env->GetStringUTFChars(js, nullptr); + if (utf == nullptr) { + // exception thrown: OutOfMemoryError + env->ExceptionCheck(); + *has_exception = JNI_TRUE; + return nullptr; + } else if (env->ExceptionCheck()) { + // exception thrown + env->ReleaseStringUTFChars(js, utf); + *has_exception = JNI_TRUE; + return nullptr; + } + + const jsize utf_len = env->GetStringUTFLength(js); + std::unique_ptr str( + new char[utf_len + + 1]); // Note: + 1 is needed for the c_str null terminator + std::strcpy(str.get(), utf); + env->ReleaseStringUTFChars(js, utf); + *has_exception = JNI_FALSE; + return str; + } + + /** + * Copies a jstring to a std::string + * and releases the original jstring + * + * If an exception occurs, then JNIEnv::ExceptionCheck() + * will have been called + * + * @param env (IN) A pointer to the java environment + * @param js (IN) The java string to copy + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError exception occurs + * + * @return A std:string copy of the jstring, or an + * empty std::string if has_exception == JNI_TRUE + */ + static std::string copyStdString(JNIEnv* env, jstring js, + jboolean* has_exception) { + const char* utf = env->GetStringUTFChars(js, nullptr); + if (utf == nullptr) { + // exception thrown: OutOfMemoryError + env->ExceptionCheck(); + *has_exception = JNI_TRUE; + return std::string(); + } else if (env->ExceptionCheck()) { + // exception thrown + env->ReleaseStringUTFChars(js, utf); + *has_exception = JNI_TRUE; + return std::string(); + } + + std::string name(utf); + env->ReleaseStringUTFChars(js, utf); + *has_exception = JNI_FALSE; + return name; + } + + /** + * Copies bytes from a std::string to a jByteArray + * + * @param env A pointer to the java environment + * @param bytes The bytes to copy + * + * @return the Java byte[], or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java specific array size + * limitation. + */ + static jbyteArray copyBytes(JNIEnv* env, std::string bytes) { + return createJavaByteArrayWithSizeCheck(env, bytes.c_str(), bytes.size()); + } + + /** + * Given a Java byte[][] which is an array of java.lang.Strings + * where each String is a byte[], the passed function `string_fn` + * will be called on each String, the result is the collected by + * calling the passed function `collector_fn` + * + * @param env (IN) A pointer to the java environment + * @param jbyte_strings (IN) A Java array of Strings expressed as bytes + * @param string_fn (IN) A transform function to call for each String + * @param collector_fn (IN) A collector which is called for the result + * of each `string_fn` + * @param has_exception (OUT) will be set to JNI_TRUE + * if an ArrayIndexOutOfBoundsException or OutOfMemoryError + * exception occurs + */ + template + static void byteStrings(JNIEnv* env, jobjectArray jbyte_strings, + std::function string_fn, + std::function collector_fn, + jboolean* has_exception) { + const jsize jlen = env->GetArrayLength(jbyte_strings); + + for (jsize i = 0; i < jlen; i++) { + jobject jbyte_string_obj = env->GetObjectArrayElement(jbyte_strings, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + *has_exception = JNI_TRUE; // signal error + return; + } + + jbyteArray jbyte_string_ary = + reinterpret_cast(jbyte_string_obj); + T result = byteString(env, jbyte_string_ary, string_fn, has_exception); + + env->DeleteLocalRef(jbyte_string_obj); + + if (*has_exception == JNI_TRUE) { + // exception thrown: OutOfMemoryError + return; + } + + collector_fn(i, result); + } + + *has_exception = JNI_FALSE; + } + + /** + * Given a Java String which is expressed as a Java Byte Array byte[], + * the passed function `string_fn` will be called on the String + * and the result returned + * + * @param env (IN) A pointer to the java environment + * @param jbyte_string_ary (IN) A Java String expressed in bytes + * @param string_fn (IN) A transform function to call on the String + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError exception occurs + */ + template + static T byteString(JNIEnv* env, jbyteArray jbyte_string_ary, + std::function string_fn, + jboolean* has_exception) { + const jsize jbyte_string_len = env->GetArrayLength(jbyte_string_ary); + return byteString(env, jbyte_string_ary, jbyte_string_len, string_fn, + has_exception); + } + + /** + * Given a Java String which is expressed as a Java Byte Array byte[], + * the passed function `string_fn` will be called on the String + * and the result returned + * + * @param env (IN) A pointer to the java environment + * @param jbyte_string_ary (IN) A Java String expressed in bytes + * @param jbyte_string_len (IN) The length of the Java String + * expressed in bytes + * @param string_fn (IN) A transform function to call on the String + * @param has_exception (OUT) will be set to JNI_TRUE + * if an OutOfMemoryError exception occurs + */ + template + static T byteString(JNIEnv* env, jbyteArray jbyte_string_ary, + const jsize jbyte_string_len, + std::function string_fn, + jboolean* has_exception) { + jbyte* jbyte_string = env->GetByteArrayElements(jbyte_string_ary, nullptr); + if (jbyte_string == nullptr) { + // exception thrown: OutOfMemoryError + *has_exception = JNI_TRUE; + return nullptr; // signal error + } + + T result = + string_fn(reinterpret_cast(jbyte_string), jbyte_string_len); + + env->ReleaseByteArrayElements(jbyte_string_ary, jbyte_string, JNI_ABORT); + + *has_exception = JNI_FALSE; + return result; + } + + /** + * Converts a std::vector to a Java byte[][] where each Java String + * is expressed as a Java Byte Array byte[]. + * + * @param env A pointer to the java environment + * @param strings A vector of Strings + * + * @return A Java array of Strings expressed as bytes, + * or nullptr if an exception is thrown + */ + static jobjectArray stringsBytes(JNIEnv* env, + std::vector strings) { + jclass jcls_ba = ByteJni::getArrayJClass(env); + if (jcls_ba == nullptr) { + // exception occurred + return nullptr; + } + + const jsize len = static_cast(strings.size()); + + jobjectArray jbyte_strings = env->NewObjectArray(len, jcls_ba, nullptr); + if (jbyte_strings == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + for (jsize i = 0; i < len; i++) { + std::string* str = &strings[i]; + const jsize str_len = static_cast(str->size()); + + jbyteArray jbyte_string_ary = env->NewByteArray(str_len); + if (jbyte_string_ary == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jbyte_strings); + return nullptr; + } + + env->SetByteArrayRegion( + jbyte_string_ary, 0, str_len, + const_cast(reinterpret_cast(str->c_str()))); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jbyte_string_ary); + env->DeleteLocalRef(jbyte_strings); + return nullptr; + } + + env->SetObjectArrayElement(jbyte_strings, i, jbyte_string_ary); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + // or ArrayStoreException + env->DeleteLocalRef(jbyte_string_ary); + env->DeleteLocalRef(jbyte_strings); + return nullptr; + } + + env->DeleteLocalRef(jbyte_string_ary); + } + + return jbyte_strings; + } + + /** + * Converts a std::vector to a Java String[]. + * + * @param env A pointer to the java environment + * @param strings A vector of Strings + * + * @return A Java array of Strings, + * or nullptr if an exception is thrown + */ + static jobjectArray toJavaStrings(JNIEnv* env, + const std::vector* strings) { + jclass jcls_str = env->FindClass("java/lang/String"); + if (jcls_str == nullptr) { + // exception occurred + return nullptr; + } + + const jsize len = static_cast(strings->size()); + + jobjectArray jstrings = env->NewObjectArray(len, jcls_str, nullptr); + if (jstrings == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + for (jsize i = 0; i < len; i++) { + const std::string* str = &((*strings)[i]); + jstring js = ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, str); + if (js == nullptr) { + env->DeleteLocalRef(jstrings); + return nullptr; + } + + env->SetObjectArrayElement(jstrings, i, js); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + // or ArrayStoreException + env->DeleteLocalRef(js); + env->DeleteLocalRef(jstrings); + return nullptr; + } + } + + return jstrings; + } + + /** + * Creates a Java UTF String from a C++ std::string + * + * @param env A pointer to the java environment + * @param string the C++ std::string + * @param treat_empty_as_null true if empty strings should be treated as null + * + * @return the Java UTF string, or nullptr if the provided string + * is null (or empty and treat_empty_as_null is set), or if an + * exception occurs allocating the Java String. + */ + static jstring toJavaString(JNIEnv* env, const std::string* string, + const bool treat_empty_as_null = false) { + if (string == nullptr) { + return nullptr; + } + + if (treat_empty_as_null && string->empty()) { + return nullptr; + } + + return env->NewStringUTF(string->c_str()); + } + + /** + * Copies bytes to a new jByteArray with the check of java array size + * limitation. + * + * @param bytes pointer to memory to copy to a new jByteArray + * @param size number of bytes to copy + * + * @return the Java byte[], or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java array size limitation to + * avoid overflow. + */ + static jbyteArray createJavaByteArrayWithSizeCheck(JNIEnv* env, + const char* bytes, + const size_t size) { + // Limitation for java array size is vm specific + // In general it cannot exceed Integer.MAX_VALUE (2^31 - 1) + // Current HotSpot VM limitation for array size is Integer.MAX_VALUE - 5 + // (2^31 - 1 - 5) It means that the next call to env->NewByteArray can still + // end with OutOfMemoryError("Requested array size exceeds VM limit") coming + // from VM + static const size_t MAX_JARRAY_SIZE = (static_cast(1)) << 31; + if (size > MAX_JARRAY_SIZE) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, "Requested array size exceeds VM limit"); + return nullptr; + } + + const jsize jlen = static_cast(size); + jbyteArray jbytes = env->NewByteArray(jlen); + if (jbytes == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion( + jbytes, 0, jlen, + const_cast(reinterpret_cast(bytes))); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jbytes); + return nullptr; + } + + return jbytes; + } + + /** + * Copies bytes from a ROCKSDB_NAMESPACE::Slice to a jByteArray + * + * @param env A pointer to the java environment + * @param bytes The bytes to copy + * + * @return the Java byte[] or nullptr if an exception occurs + * + * @throws RocksDBException thrown + * if memory size to copy exceeds general java specific array size + * limitation. + */ + static jbyteArray copyBytes(JNIEnv* env, const Slice& bytes) { + return createJavaByteArrayWithSizeCheck(env, bytes.data(), bytes.size()); + } + + /* + * Helper for operations on a key and value + * for example WriteBatch->Put + * + * TODO(AR) could be used for RocksDB->Put etc. + */ + static std::unique_ptr kv_op( + std::function + op, + JNIEnv* env, jobject /*jobj*/, jbyteArray jkey, jint jkey_len, + jbyteArray jvalue, jint jvalue_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + jbyte* value = env->GetByteArrayElements(jvalue, nullptr); + if (env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + if (key != nullptr) { + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + } + return nullptr; + } + + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), jkey_len); + ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast(value), + jvalue_len); + + auto status = op(key_slice, value_slice); + + if (value != nullptr) { + env->ReleaseByteArrayElements(jvalue, value, JNI_ABORT); + } + if (key != nullptr) { + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + } + + return std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(status)); + } + + /* + * Helper for operations on a key + * for example WriteBatch->Delete + * + * TODO(AR) could be used for RocksDB->Delete etc. + */ + static std::unique_ptr k_op( + std::function op, + JNIEnv* env, jobject /*jobj*/, jbyteArray jkey, jint jkey_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), jkey_len); + + auto status = op(key_slice); + + if (key != nullptr) { + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + } + + return std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(status)); + } + + /* + * Helper for operations on a key which is a region of an array + * Used to extract the common code from seek/seekForPrev. + * Possible that it can be generalised from that. + * + * We use GetByteArrayRegion to copy the key region of the whole array into + * a char[] We suspect this is not much slower than GetByteArrayElements, + * which probably copies anyway. + */ + static void k_op_region(std::function op, + JNIEnv* env, jbyteArray jkey, jint jkey_off, + jint jkey_len) { + const std::unique_ptr key(new char[jkey_len]); + if (key == nullptr) { + jclass oom_class = env->FindClass("/lang/java/OutOfMemoryError"); + env->ThrowNew(oom_class, + "Memory allocation failed in RocksDB JNI function"); + return; + } + env->GetByteArrayRegion(jkey, jkey_off, jkey_len, + reinterpret_cast(key.get())); + if (env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + return; + } + + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key.get()), + jkey_len); + op(key_slice); + } + + /* + * Helper for operations on a value + * for example WriteBatchWithIndex->GetFromBatch + */ + static jbyteArray v_op(std::function + op, + JNIEnv* env, jbyteArray jkey, jint jkey_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), jkey_len); + + std::string value; + ROCKSDB_NAMESPACE::Status s = op(key_slice, &value); + + if (key != nullptr) { + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + } + + if (s.IsNotFound()) { + return nullptr; + } + + if (s.ok()) { + jbyteArray jret_value = + env->NewByteArray(static_cast(value.size())); + if (jret_value == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion( + jret_value, 0, static_cast(value.size()), + const_cast(reinterpret_cast(value.c_str()))); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + if (jret_value != nullptr) { + env->DeleteLocalRef(jret_value); + } + return nullptr; + } + + return jret_value; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + /** + * Creates a vector of C++ pointers from + * a Java array of C++ pointer addresses. + * + * @param env (IN) A pointer to the java environment + * @param pointers (IN) A Java array of C++ pointer addresses + * @param has_exception (OUT) will be set to JNI_TRUE + * if an ArrayIndexOutOfBoundsException or OutOfMemoryError + * exception occurs. + * + * @return A vector of C++ pointers. + */ + template + static std::vector fromJPointers(JNIEnv* env, jlongArray jptrs, + jboolean* has_exception) { + const jsize jptrs_len = env->GetArrayLength(jptrs); + std::vector ptrs; + jlong* jptr = env->GetLongArrayElements(jptrs, nullptr); + if (jptr == nullptr) { + // exception thrown: OutOfMemoryError + *has_exception = JNI_TRUE; + return ptrs; + } + ptrs.reserve(jptrs_len); + for (jsize i = 0; i < jptrs_len; i++) { + ptrs.push_back(reinterpret_cast(jptr[i])); + } + env->ReleaseLongArrayElements(jptrs, jptr, JNI_ABORT); + return ptrs; + } + + /** + * Creates a Java array of C++ pointer addresses + * from a vector of C++ pointers. + * + * @param env (IN) A pointer to the java environment + * @param pointers (IN) A vector of C++ pointers + * @param has_exception (OUT) will be set to JNI_TRUE + * if an ArrayIndexOutOfBoundsException or OutOfMemoryError + * exception occurs + * + * @return Java array of C++ pointer addresses. + */ + template + static jlongArray toJPointers(JNIEnv* env, const std::vector& pointers, + jboolean* has_exception) { + const jsize len = static_cast(pointers.size()); + std::unique_ptr results(new jlong[len]); + std::transform( + pointers.begin(), pointers.end(), results.get(), + [](T* pointer) -> jlong { return GET_CPLUSPLUS_POINTER(pointer); }); + + jlongArray jpointers = env->NewLongArray(len); + if (jpointers == nullptr) { + // exception thrown: OutOfMemoryError + *has_exception = JNI_TRUE; + return nullptr; + } + + env->SetLongArrayRegion(jpointers, 0, len, results.get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + *has_exception = JNI_TRUE; + env->DeleteLocalRef(jpointers); + return nullptr; + } + + *has_exception = JNI_FALSE; + + return jpointers; + } + + /* + * Helper for operations on a key and value + * for example WriteBatch->Put + * + * TODO(AR) could be extended to cover returning ROCKSDB_NAMESPACE::Status + * from `op` and used for RocksDB->Put etc. + */ + static void kv_op_direct( + std::function + op, + JNIEnv* env, jobject jkey, jint jkey_off, jint jkey_len, jobject jval, + jint jval_off, jint jval_len) { + char* key = reinterpret_cast(env->GetDirectBufferAddress(jkey)); + if (key == nullptr || + env->GetDirectBufferCapacity(jkey) < (jkey_off + jkey_len)) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, + "Invalid key argument"); + return; + } + + char* value = reinterpret_cast(env->GetDirectBufferAddress(jval)); + if (value == nullptr || + env->GetDirectBufferCapacity(jval) < (jval_off + jval_len)) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, "Invalid value argument"); + return; + } + + key += jkey_off; + value += jval_off; + + ROCKSDB_NAMESPACE::Slice key_slice(key, jkey_len); + ROCKSDB_NAMESPACE::Slice value_slice(value, jval_len); + + op(key_slice, value_slice); + } + + /* + * Helper for operations on a key and value + * for example WriteBatch->Delete + * + * TODO(AR) could be extended to cover returning ROCKSDB_NAMESPACE::Status + * from `op` and used for RocksDB->Delete etc. + */ + static void k_op_direct(std::function op, + JNIEnv* env, jobject jkey, jint jkey_off, + jint jkey_len) { + char* key = reinterpret_cast(env->GetDirectBufferAddress(jkey)); + if (key == nullptr || + env->GetDirectBufferCapacity(jkey) < (jkey_off + jkey_len)) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, + "Invalid key argument"); + return; + } + + key += jkey_off; + + ROCKSDB_NAMESPACE::Slice key_slice(key, jkey_len); + + return op(key_slice); + } + + template + static jint copyToDirect(JNIEnv* env, T& source, jobject jtarget, + jint jtarget_off, jint jtarget_len) { + char* target = + reinterpret_cast(env->GetDirectBufferAddress(jtarget)); + if (target == nullptr || + env->GetDirectBufferCapacity(jtarget) < (jtarget_off + jtarget_len)) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, "Invalid target argument"); + return 0; + } + + target += jtarget_off; + + const jint cvalue_len = static_cast(source.size()); + const jint length = std::min(jtarget_len, cvalue_len); + + memcpy(target, source.data(), length); + + return cvalue_len; + } +}; + +class MapJni : public JavaClass { + public: + /** + * Get the Java Class java.util.Map + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/Map"); + } + + /** + * Get the Java Method: Map#put + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getMapPutMethodId(JNIEnv* env) { + jclass jlist_clazz = getJClass(env); + if (jlist_clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID( + jlist_clazz, "put", + "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;"); + assert(mid != nullptr); + return mid; + } +}; + +class HashMapJni : public JavaClass { + public: + /** + * Get the Java Class java.util.HashMap + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "java/util/HashMap"); + } + + /** + * Create a new Java java.util.HashMap object. + * + * @param env A pointer to the Java environment + * + * @return A reference to a Java java.util.HashMap object, or + * nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, const uint32_t initial_capacity = 16) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", "(I)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jobject jhash_map = + env->NewObject(jclazz, mid, static_cast(initial_capacity)); + if (env->ExceptionCheck()) { + return nullptr; + } + + return jhash_map; + } + + /** + * A function which maps a std::pair to a std::pair + * + * @return Either a pointer to a std::pair, or nullptr + * if an error occurs during the mapping + */ + template + using FnMapKV = + std::function>(const std::pair&)>; + + // template ::value_type, std::pair>::value, + // int32_t>::type = 0> static void putAll(JNIEnv* env, const jobject + // jhash_map, I iterator, const FnMapKV &fn_map_kv) { + /** + * Returns true if it succeeds, false if an error occurs + */ + template + static bool putAll(JNIEnv* env, const jobject jhash_map, + iterator_type iterator, iterator_type end, + const FnMapKV& fn_map_kv) { + const jmethodID jmid_put = + ROCKSDB_NAMESPACE::MapJni::getMapPutMethodId(env); + if (jmid_put == nullptr) { + return false; + } + + for (auto it = iterator; it != end; ++it) { + const std::unique_ptr> result = + fn_map_kv(*it); + if (result == nullptr) { + // an error occurred during fn_map_kv + return false; + } + env->CallObjectMethod(jhash_map, jmid_put, result->first, result->second); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(result->second); + env->DeleteLocalRef(result->first); + return false; + } + + // release local references + env->DeleteLocalRef(result->second); + env->DeleteLocalRef(result->first); + } + + return true; + } + + /** + * Creates a java.util.Map from a std::map + * + * @param env A pointer to the Java environment + * @param map the Cpp map + * + * @return a reference to the Java java.util.Map object, or nullptr if an + * exception occcurred + */ + static jobject fromCppMap(JNIEnv* env, + const std::map* map) { + if (map == nullptr) { + return nullptr; + } + + jobject jhash_map = construct(env, static_cast(map->size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV< + const std::string, const std::string, jobject, jobject> + fn_map_kv = + [env](const std::pair& kv) { + jstring jkey = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &(kv.first), false); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jstring jvalue = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &(kv.second), true); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>( + new std::pair( + static_cast(jkey), + static_cast(jvalue))); + }; + + if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; + } + + /** + * Creates a java.util.Map from a std::map + * + * @param env A pointer to the Java environment + * @param map the Cpp map + * + * @return a reference to the Java java.util.Map object, or nullptr if an + * exception occcurred + */ + static jobject fromCppMap(JNIEnv* env, + const std::map* map) { + if (map == nullptr) { + return nullptr; + } + + if (map == nullptr) { + return nullptr; + } + + jobject jhash_map = construct(env, static_cast(map->size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV< + const std::string, const uint32_t, jobject, jobject> + fn_map_kv = + [env](const std::pair& kv) { + jstring jkey = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &(kv.first), false); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jobject jvalue = ROCKSDB_NAMESPACE::IntegerJni::valueOf( + env, static_cast(kv.second)); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>( + new std::pair(static_cast(jkey), + jvalue)); + }; + + if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; + } + + /** + * Creates a java.util.Map from a std::map + * + * @param env A pointer to the Java environment + * @param map the Cpp map + * + * @return a reference to the Java java.util.Map object, or nullptr if an + * exception occcurred + */ + static jobject fromCppMap(JNIEnv* env, + const std::map* map) { + if (map == nullptr) { + return nullptr; + } + + jobject jhash_map = construct(env, static_cast(map->size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV< + const std::string, const uint64_t, jobject, jobject> + fn_map_kv = + [env](const std::pair& kv) { + jstring jkey = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &(kv.first), false); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jobject jvalue = ROCKSDB_NAMESPACE::LongJni::valueOf( + env, static_cast(kv.second)); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>( + new std::pair(static_cast(jkey), + jvalue)); + }; + + if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; + } + + /** + * Creates a java.util.Map from a std::map + * + * @param env A pointer to the Java environment + * @param map the Cpp map + * + * @return a reference to the Java java.util.Map object, or nullptr if an + * exception occcurred + */ + static jobject fromCppMap(JNIEnv* env, + const std::map* map) { + if (map == nullptr) { + return nullptr; + } + + jobject jhash_map = construct(env, static_cast(map->size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV + fn_map_kv = [env](const std::pair& kv) { + jobject jkey = ROCKSDB_NAMESPACE::IntegerJni::valueOf( + env, static_cast(kv.first)); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jobject jvalue = ROCKSDB_NAMESPACE::LongJni::valueOf( + env, static_cast(kv.second)); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>( + new std::pair(static_cast(jkey), + jvalue)); + }; + + if (!putAll(env, jhash_map, map->begin(), map->end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; + } +}; + +// The portal class for org.rocksdb.RocksDB +class RocksDBJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.RocksDB + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksDB"); + } +}; + +// The portal class for org.rocksdb.Options +class OptionsJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.Options + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/Options"); + } +}; + +// The portal class for org.rocksdb.DBOptions +class DBOptionsJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.DBOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/DBOptions"); + } +}; + +// The portal class for org.rocksdb.ColumnFamilyOptions +class ColumnFamilyOptionsJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.ColumnFamilyOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/ColumnFamilyOptions"); + } + + /** + * Create a new Java org.rocksdb.ColumnFamilyOptions object with the same + * properties as the provided C++ ROCKSDB_NAMESPACE::ColumnFamilyOptions + * object + * + * @param env A pointer to the Java environment + * @param cfoptions A pointer to ROCKSDB_NAMESPACE::ColumnFamilyOptions object + * + * @return A reference to a Java org.rocksdb.ColumnFamilyOptions object, or + * nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, const ColumnFamilyOptions* cfoptions) { + auto* cfo = new ROCKSDB_NAMESPACE::ColumnFamilyOptions(*cfoptions); + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", "(J)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jobject jcfd = env->NewObject(jclazz, mid, GET_CPLUSPLUS_POINTER(cfo)); + if (env->ExceptionCheck()) { + return nullptr; + } + + return jcfd; + } +}; + +// The portal class for org.rocksdb.WriteOptions +class WriteOptionsJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.WriteOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteOptions"); + } +}; + +// The portal class for org.rocksdb.ReadOptions +class ReadOptionsJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.ReadOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/ReadOptions"); + } +}; + +// The portal class for org.rocksdb.WriteBatch +class WriteBatchJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.WriteBatch + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteBatch"); + } + + /** + * Create a new Java org.rocksdb.WriteBatch object + * + * @param env A pointer to the Java environment + * @param wb A pointer to ROCKSDB_NAMESPACE::WriteBatch object + * + * @return A reference to a Java org.rocksdb.WriteBatch object, or + * nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, const WriteBatch* wb) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", "(J)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jobject jwb = env->NewObject(jclazz, mid, GET_CPLUSPLUS_POINTER(wb)); + if (env->ExceptionCheck()) { + return nullptr; + } + + return jwb; + } +}; + +// The portal class for org.rocksdb.WriteBatch.Handler +class WriteBatchHandlerJni + : public RocksDBNativeClass< + const ROCKSDB_NAMESPACE::WriteBatchHandlerJniCallback*, + WriteBatchHandlerJni> { + public: + /** + * Get the Java Class org.rocksdb.WriteBatch.Handler + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/WriteBatch$Handler"); + } + + /** + * Get the Java Method: WriteBatch.Handler#put + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getPutCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "put", "(I[B[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#put + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getPutMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "put", "([B[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#merge + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getMergeCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "merge", "(I[B[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#merge + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getMergeMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "merge", "([B[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#delete + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getDeleteCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "delete", "(I[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#delete + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getDeleteMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "delete", "([B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#singleDelete + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getSingleDeleteCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "(I[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#singleDelete + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getSingleDeleteMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "singleDelete", "([B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#deleteRange + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getDeleteRangeCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "(I[B[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#deleteRange + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getDeleteRangeMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "deleteRange", "([B[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#logData + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getLogDataMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "logData", "([B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#putBlobIndex + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getPutBlobIndexCfMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "putBlobIndex", "(I[B[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#markBeginPrepare + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getMarkBeginPrepareMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "markBeginPrepare", "()V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#markEndPrepare + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getMarkEndPrepareMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "markEndPrepare", "([B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#markNoop + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getMarkNoopMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "markNoop", "(Z)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#markRollback + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getMarkRollbackMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "markRollback", "([B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#markCommit + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getMarkCommitMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "markCommit", "([B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#markCommitWithTimestamp + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getMarkCommitWithTimestampMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "markCommitWithTimestamp", "([B[B)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: WriteBatch.Handler#shouldContinue + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getContinueMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "shouldContinue", "()Z"); + assert(mid != nullptr); + return mid; + } +}; + +class WriteBatchSavePointJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.WriteBatch.SavePoint + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/WriteBatch$SavePoint"); + } + + /** + * Get the Java Method: HistogramData constructor + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getConstructorMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "", "(JJJ)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Create a new Java org.rocksdb.WriteBatch.SavePoint object + * + * @param env A pointer to the Java environment + * @param savePoint A pointer to ROCKSDB_NAMESPACE::WriteBatch::SavePoint + * object + * + * @return A reference to a Java org.rocksdb.WriteBatch.SavePoint object, or + * nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, const SavePoint& save_point) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = getConstructorMethodId(env); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jobject jsave_point = + env->NewObject(jclazz, mid, static_cast(save_point.size), + static_cast(save_point.count), + static_cast(save_point.content_flags)); + if (env->ExceptionCheck()) { + return nullptr; + } + + return jsave_point; + } +}; + +// The portal class for org.rocksdb.WriteBatchWithIndex +class WriteBatchWithIndexJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.WriteBatchWithIndex + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/WriteBatchWithIndex"); + } +}; + +// The portal class for org.rocksdb.HistogramData +class HistogramDataJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.HistogramData + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/HistogramData"); + } + + /** + * Get the Java Method: HistogramData constructor + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getConstructorMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "", "(DDDDDDJJD)V"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.BackupEngineOptions +class BackupEngineOptionsJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.BackupEngineOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/BackupEngineOptions"); + } +}; + +// The portal class for org.rocksdb.BackupEngine +class BackupEngineJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.BackupEngine + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/BackupEngine"); + } +}; + +// The portal class for org.rocksdb.RocksIterator +class IteratorJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.RocksIterator + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksIterator"); + } +}; + +// The portal class for org.rocksdb.Filter +class FilterJni + : public RocksDBNativeClass< + std::shared_ptr*, FilterJni> { + public: + /** + * Get the Java Class org.rocksdb.Filter + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/Filter"); + } +}; + +// The portal class for org.rocksdb.ColumnFamilyHandle +class ColumnFamilyHandleJni + : public RocksDBNativeClass { + public: + static jobject fromCppColumnFamilyHandle( + JNIEnv* env, const ROCKSDB_NAMESPACE::ColumnFamilyHandle* info) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID ctor = getConstructorMethodId(env, jclazz); + assert(ctor != nullptr); + return env->NewObject(jclazz, ctor, GET_CPLUSPLUS_POINTER(info)); + } + + static jmethodID getConstructorMethodId(JNIEnv* env, jclass clazz) { + return env->GetMethodID(clazz, "", "(J)V"); + } + + /** + * Get the Java Class org.rocksdb.ColumnFamilyHandle + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/ColumnFamilyHandle"); + } +}; + +// The portal class for org.rocksdb.FlushOptions +class FlushOptionsJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.FlushOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/FlushOptions"); + } +}; + +// The portal class for org.rocksdb.ComparatorOptions +class ComparatorOptionsJni + : public RocksDBNativeClass< + ROCKSDB_NAMESPACE::ComparatorJniCallbackOptions*, + ComparatorOptionsJni> { + public: + /** + * Get the Java Class org.rocksdb.ComparatorOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/ComparatorOptions"); + } +}; + +// The portal class for org.rocksdb.AbstractCompactionFilterFactory +class AbstractCompactionFilterFactoryJni + : public RocksDBNativeClass< + const ROCKSDB_NAMESPACE::CompactionFilterFactoryJniCallback*, + AbstractCompactionFilterFactoryJni> { + public: + /** + * Get the Java Class org.rocksdb.AbstractCompactionFilterFactory + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass( + env, "org/rocksdb/AbstractCompactionFilterFactory"); + } + + /** + * Get the Java Method: AbstractCompactionFilterFactory#name + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getNameMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "name", "()Ljava/lang/String;"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractCompactionFilterFactory#createCompactionFilter + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getCreateCompactionFilterMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "createCompactionFilter", "(ZZ)J"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.AbstractTransactionNotifier +class AbstractTransactionNotifierJni + : public RocksDBNativeClass< + const ROCKSDB_NAMESPACE::TransactionNotifierJniCallback*, + AbstractTransactionNotifierJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass( + env, "org/rocksdb/AbstractTransactionNotifier"); + } + + // Get the java method `snapshotCreated` + // of org.rocksdb.AbstractTransactionNotifier. + static jmethodID getSnapshotCreatedMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "snapshotCreated", "(J)V"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.AbstractComparatorJniBridge +class AbstractComparatorJniBridge : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.AbstractComparatorJniBridge + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/AbstractComparatorJniBridge"); + } + + /** + * Get the Java Method: Comparator#compareInternal + * + * @param env A pointer to the Java environment + * @param jclazz the AbstractComparatorJniBridge class + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getCompareInternalMethodId(JNIEnv* env, jclass jclazz) { + static jmethodID mid = + env->GetStaticMethodID(jclazz, "compareInternal", + "(Lorg/rocksdb/AbstractComparator;Ljava/nio/" + "ByteBuffer;ILjava/nio/ByteBuffer;I)I"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: Comparator#findShortestSeparatorInternal + * + * @param env A pointer to the Java environment + * @param jclazz the AbstractComparatorJniBridge class + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getFindShortestSeparatorInternalMethodId(JNIEnv* env, + jclass jclazz) { + static jmethodID mid = + env->GetStaticMethodID(jclazz, "findShortestSeparatorInternal", + "(Lorg/rocksdb/AbstractComparator;Ljava/nio/" + "ByteBuffer;ILjava/nio/ByteBuffer;I)I"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: Comparator#findShortSuccessorInternal + * + * @param env A pointer to the Java environment + * @param jclazz the AbstractComparatorJniBridge class + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getFindShortSuccessorInternalMethodId(JNIEnv* env, + jclass jclazz) { + static jmethodID mid = env->GetStaticMethodID( + jclazz, "findShortSuccessorInternal", + "(Lorg/rocksdb/AbstractComparator;Ljava/nio/ByteBuffer;I)I"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.AbstractComparator +class AbstractComparatorJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.AbstractComparator + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/AbstractComparator"); + } + + /** + * Get the Java Method: Comparator#name + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getNameMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "name", "()Ljava/lang/String;"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.AbstractSlice +class AbstractSliceJni + : public NativeRocksMutableObject { + public: + /** + * Get the Java Class org.rocksdb.AbstractSlice + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/AbstractSlice"); + } +}; + +// The portal class for org.rocksdb.Slice +class SliceJni + : public NativeRocksMutableObject { + public: + /** + * Get the Java Class org.rocksdb.Slice + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/Slice"); + } + + /** + * Constructs a Slice object + * + * @param env A pointer to the Java environment + * + * @return A reference to a Java Slice object, or a nullptr if an + * exception occurs + */ + static jobject construct0(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "", "()V"); + if (mid == nullptr) { + // exception occurred accessing method + return nullptr; + } + + jobject jslice = env->NewObject(jclazz, mid); + if (env->ExceptionCheck()) { + return nullptr; + } + + return jslice; + } +}; + +// The portal class for org.rocksdb.DirectSlice +class DirectSliceJni + : public NativeRocksMutableObject { + public: + /** + * Get the Java Class org.rocksdb.DirectSlice + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/DirectSlice"); + } + + /** + * Constructs a DirectSlice object + * + * @param env A pointer to the Java environment + * + * @return A reference to a Java DirectSlice object, or a nullptr if an + * exception occurs + */ + static jobject construct0(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "", "()V"); + if (mid == nullptr) { + // exception occurred accessing method + return nullptr; + } + + jobject jdirect_slice = env->NewObject(jclazz, mid); + if (env->ExceptionCheck()) { + return nullptr; + } + + return jdirect_slice; + } +}; + +// The portal class for org.rocksdb.BackupInfo +class BackupInfoJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.BackupInfo + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/BackupInfo"); + } + + /** + * Constructs a BackupInfo object + * + * @param env A pointer to the Java environment + * @param backup_id id of the backup + * @param timestamp timestamp of the backup + * @param size size of the backup + * @param number_files number of files related to the backup + * @param app_metadata application specific metadata + * + * @return A reference to a Java BackupInfo object, or a nullptr if an + * exception occurs + */ + static jobject construct0(JNIEnv* env, uint32_t backup_id, int64_t timestamp, + uint64_t size, uint32_t number_files, + const std::string& app_metadata) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "", "(IJJILjava/lang/String;)V"); + if (mid == nullptr) { + // exception occurred accessing method + return nullptr; + } + + jstring japp_metadata = nullptr; + if (app_metadata != nullptr) { + japp_metadata = env->NewStringUTF(app_metadata.c_str()); + if (japp_metadata == nullptr) { + // exception occurred creating java string + return nullptr; + } + } + + jobject jbackup_info = env->NewObject(jclazz, mid, backup_id, timestamp, + size, number_files, japp_metadata); + if (env->ExceptionCheck()) { + env->DeleteLocalRef(japp_metadata); + return nullptr; + } + + return jbackup_info; + } +}; + +class BackupInfoListJni { + public: + /** + * Converts a C++ std::vector object to + * a Java ArrayList object + * + * @param env A pointer to the Java environment + * @param backup_infos A vector of BackupInfo + * + * @return Either a reference to a Java ArrayList object, or a nullptr + * if an exception occurs + */ + static jobject getBackupInfo(JNIEnv* env, + std::vector backup_infos) { + jclass jarray_list_clazz = + ROCKSDB_NAMESPACE::ListJni::getArrayListClass(env); + if (jarray_list_clazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID cstr_mid = + ROCKSDB_NAMESPACE::ListJni::getArrayListConstructorMethodId(env); + if (cstr_mid == nullptr) { + // exception occurred accessing method + return nullptr; + } + + jmethodID add_mid = ROCKSDB_NAMESPACE::ListJni::getListAddMethodId(env); + if (add_mid == nullptr) { + // exception occurred accessing method + return nullptr; + } + + // create java list + jobject jbackup_info_handle_list = + env->NewObject(jarray_list_clazz, cstr_mid, backup_infos.size()); + if (env->ExceptionCheck()) { + // exception occurred constructing object + return nullptr; + } + + // insert in java list + auto end = backup_infos.end(); + for (auto it = backup_infos.begin(); it != end; ++it) { + auto backup_info = *it; + + jobject obj = ROCKSDB_NAMESPACE::BackupInfoJni::construct0( + env, backup_info.backup_id, backup_info.timestamp, backup_info.size, + backup_info.number_files, backup_info.app_metadata); + if (env->ExceptionCheck()) { + // exception occurred constructing object + if (obj != nullptr) { + env->DeleteLocalRef(obj); + } + if (jbackup_info_handle_list != nullptr) { + env->DeleteLocalRef(jbackup_info_handle_list); + } + return nullptr; + } + + jboolean rs = + env->CallBooleanMethod(jbackup_info_handle_list, add_mid, obj); + if (env->ExceptionCheck() || rs == JNI_FALSE) { + // exception occurred calling method, or could not add + if (obj != nullptr) { + env->DeleteLocalRef(obj); + } + if (jbackup_info_handle_list != nullptr) { + env->DeleteLocalRef(jbackup_info_handle_list); + } + return nullptr; + } + } + + return jbackup_info_handle_list; + } +}; + +// The portal class for org.rocksdb.WBWIRocksIterator +class WBWIRocksIteratorJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.WBWIRocksIterator + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/WBWIRocksIterator"); + } + + /** + * Get the Java Field: WBWIRocksIterator#entry + * + * @param env A pointer to the Java environment + * + * @return The Java Field ID or nullptr if the class or field id could not + * be retrieved + */ + static jfieldID getWriteEntryField(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jfieldID fid = env->GetFieldID( + jclazz, "entry", "Lorg/rocksdb/WBWIRocksIterator$WriteEntry;"); + assert(fid != nullptr); + return fid; + } + + /** + * Gets the value of the WBWIRocksIterator#entry + * + * @param env A pointer to the Java environment + * @param jwbwi_rocks_iterator A reference to a WBWIIterator + * + * @return A reference to a Java WBWIRocksIterator.WriteEntry object, or + * a nullptr if an exception occurs + */ + static jobject getWriteEntry(JNIEnv* env, jobject jwbwi_rocks_iterator) { + assert(jwbwi_rocks_iterator != nullptr); + + jfieldID jwrite_entry_field = getWriteEntryField(env); + if (jwrite_entry_field == nullptr) { + // exception occurred accessing the field + return nullptr; + } + + jobject jwe = env->GetObjectField(jwbwi_rocks_iterator, jwrite_entry_field); + assert(jwe != nullptr); + return jwe; + } +}; + +// The portal class for org.rocksdb.WBWIRocksIterator.WriteType +class WriteTypeJni : public JavaClass { + public: + /** + * Get the PUT enum field value of WBWIRocksIterator.WriteType + * + * @param env A pointer to the Java environment + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject PUT(JNIEnv* env) { return getEnum(env, "PUT"); } + + /** + * Get the MERGE enum field value of WBWIRocksIterator.WriteType + * + * @param env A pointer to the Java environment + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject MERGE(JNIEnv* env) { return getEnum(env, "MERGE"); } + + /** + * Get the DELETE enum field value of WBWIRocksIterator.WriteType + * + * @param env A pointer to the Java environment + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject DELETE(JNIEnv* env) { return getEnum(env, "DELETE"); } + + /** + * Get the LOG enum field value of WBWIRocksIterator.WriteType + * + * @param env A pointer to the Java environment + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject LOG(JNIEnv* env) { return getEnum(env, "LOG"); } + + // Returns the equivalent org.rocksdb.WBWIRocksIterator.WriteType for the + // provided C++ ROCKSDB_NAMESPACE::WriteType enum + static jbyte toJavaWriteType(const ROCKSDB_NAMESPACE::WriteType& writeType) { + switch (writeType) { + case ROCKSDB_NAMESPACE::WriteType::kPutRecord: + return 0x0; + case ROCKSDB_NAMESPACE::WriteType::kMergeRecord: + return 0x1; + case ROCKSDB_NAMESPACE::WriteType::kDeleteRecord: + return 0x2; + case ROCKSDB_NAMESPACE::WriteType::kSingleDeleteRecord: + return 0x3; + case ROCKSDB_NAMESPACE::WriteType::kDeleteRangeRecord: + return 0x4; + case ROCKSDB_NAMESPACE::WriteType::kLogDataRecord: + return 0x5; + case ROCKSDB_NAMESPACE::WriteType::kXIDRecord: + return 0x6; + default: + return 0x7F; // undefined + } + } + + private: + /** + * Get the Java Class org.rocksdb.WBWIRocksIterator.WriteType + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/WBWIRocksIterator$WriteType"); + } + + /** + * Get an enum field of org.rocksdb.WBWIRocksIterator.WriteType + * + * @param env A pointer to the Java environment + * @param name The name of the enum field + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject getEnum(JNIEnv* env, const char name[]) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jfieldID jfid = env->GetStaticFieldID( + jclazz, name, "Lorg/rocksdb/WBWIRocksIterator$WriteType;"); + if (env->ExceptionCheck()) { + // exception occurred while getting field + return nullptr; + } else if (jfid == nullptr) { + return nullptr; + } + + jobject jwrite_type = env->GetStaticObjectField(jclazz, jfid); + assert(jwrite_type != nullptr); + return jwrite_type; + } +}; + +// The portal class for org.rocksdb.WBWIRocksIterator.WriteEntry +class WriteEntryJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.WBWIRocksIterator.WriteEntry + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, + "org/rocksdb/WBWIRocksIterator$WriteEntry"); + } +}; + +// The portal class for org.rocksdb.InfoLogLevel +class InfoLogLevelJni : public JavaClass { + public: + /** + * Get the DEBUG_LEVEL enum field value of InfoLogLevel + * + * @param env A pointer to the Java environment + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject DEBUG_LEVEL(JNIEnv* env) { + return getEnum(env, "DEBUG_LEVEL"); + } + + /** + * Get the INFO_LEVEL enum field value of InfoLogLevel + * + * @param env A pointer to the Java environment + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject INFO_LEVEL(JNIEnv* env) { return getEnum(env, "INFO_LEVEL"); } + + /** + * Get the WARN_LEVEL enum field value of InfoLogLevel + * + * @param env A pointer to the Java environment + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject WARN_LEVEL(JNIEnv* env) { return getEnum(env, "WARN_LEVEL"); } + + /** + * Get the ERROR_LEVEL enum field value of InfoLogLevel + * + * @param env A pointer to the Java environment + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject ERROR_LEVEL(JNIEnv* env) { + return getEnum(env, "ERROR_LEVEL"); + } + + /** + * Get the FATAL_LEVEL enum field value of InfoLogLevel + * + * @param env A pointer to the Java environment + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject FATAL_LEVEL(JNIEnv* env) { + return getEnum(env, "FATAL_LEVEL"); + } + + /** + * Get the HEADER_LEVEL enum field value of InfoLogLevel + * + * @param env A pointer to the Java environment + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject HEADER_LEVEL(JNIEnv* env) { + return getEnum(env, "HEADER_LEVEL"); + } + + private: + /** + * Get the Java Class org.rocksdb.InfoLogLevel + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/InfoLogLevel"); + } + + /** + * Get an enum field of org.rocksdb.InfoLogLevel + * + * @param env A pointer to the Java environment + * @param name The name of the enum field + * + * @return A reference to the enum field value or a nullptr if + * the enum field value could not be retrieved + */ + static jobject getEnum(JNIEnv* env, const char name[]) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jfieldID jfid = + env->GetStaticFieldID(jclazz, name, "Lorg/rocksdb/InfoLogLevel;"); + if (env->ExceptionCheck()) { + // exception occurred while getting field + return nullptr; + } else if (jfid == nullptr) { + return nullptr; + } + + jobject jinfo_log_level = env->GetStaticObjectField(jclazz, jfid); + assert(jinfo_log_level != nullptr); + return jinfo_log_level; + } +}; + +// The portal class for org.rocksdb.Logger +class LoggerJni + : public RocksDBNativeClass< + std::shared_ptr*, LoggerJni> { + public: + /** + * Get the Java Class org/rocksdb/Logger + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/Logger"); + } + + /** + * Get the Java Method: Logger#log + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getLogMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID( + jclazz, "log", "(Lorg/rocksdb/InfoLogLevel;Ljava/lang/String;)V"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.TransactionLogIterator.BatchResult +class BatchResultJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.TransactionLogIterator.BatchResult + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass( + env, "org/rocksdb/TransactionLogIterator$BatchResult"); + } + + /** + * Create a new Java org.rocksdb.TransactionLogIterator.BatchResult object + * with the same properties as the provided C++ ROCKSDB_NAMESPACE::BatchResult + * object + * + * @param env A pointer to the Java environment + * @param batch_result The ROCKSDB_NAMESPACE::BatchResult object + * + * @return A reference to a Java + * org.rocksdb.TransactionLogIterator.BatchResult object, + * or nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, + ROCKSDB_NAMESPACE::BatchResult& batch_result) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", "(JJ)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jobject jbatch_result = env->NewObject(jclazz, mid, batch_result.sequence, + batch_result.writeBatchPtr.get()); + if (jbatch_result == nullptr) { + // exception thrown: InstantiationException or OutOfMemoryError + return nullptr; + } + + batch_result.writeBatchPtr.release(); + return jbatch_result; + } +}; + +// The portal class for org.rocksdb.BottommostLevelCompaction +class BottommostLevelCompactionJni { + public: + // Returns the equivalent org.rocksdb.BottommostLevelCompaction for the + // provided C++ ROCKSDB_NAMESPACE::BottommostLevelCompaction enum + static jint toJavaBottommostLevelCompaction( + const ROCKSDB_NAMESPACE::BottommostLevelCompaction& + bottommost_level_compaction) { + switch (bottommost_level_compaction) { + case ROCKSDB_NAMESPACE::BottommostLevelCompaction::kSkip: + return 0x0; + case ROCKSDB_NAMESPACE::BottommostLevelCompaction:: + kIfHaveCompactionFilter: + return 0x1; + case ROCKSDB_NAMESPACE::BottommostLevelCompaction::kForce: + return 0x2; + case ROCKSDB_NAMESPACE::BottommostLevelCompaction::kForceOptimized: + return 0x3; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::BottommostLevelCompaction + // enum for the provided Java org.rocksdb.BottommostLevelCompaction + static ROCKSDB_NAMESPACE::BottommostLevelCompaction + toCppBottommostLevelCompaction(jint bottommost_level_compaction) { + switch (bottommost_level_compaction) { + case 0x0: + return ROCKSDB_NAMESPACE::BottommostLevelCompaction::kSkip; + case 0x1: + return ROCKSDB_NAMESPACE::BottommostLevelCompaction:: + kIfHaveCompactionFilter; + case 0x2: + return ROCKSDB_NAMESPACE::BottommostLevelCompaction::kForce; + case 0x3: + return ROCKSDB_NAMESPACE::BottommostLevelCompaction::kForceOptimized; + default: + // undefined/default + return ROCKSDB_NAMESPACE::BottommostLevelCompaction:: + kIfHaveCompactionFilter; + } + } +}; + +// The portal class for org.rocksdb.CompactionStopStyle +class CompactionStopStyleJni { + public: + // Returns the equivalent org.rocksdb.CompactionStopStyle for the provided + // C++ ROCKSDB_NAMESPACE::CompactionStopStyle enum + static jbyte toJavaCompactionStopStyle( + const ROCKSDB_NAMESPACE::CompactionStopStyle& compaction_stop_style) { + switch (compaction_stop_style) { + case ROCKSDB_NAMESPACE::CompactionStopStyle:: + kCompactionStopStyleSimilarSize: + return 0x0; + case ROCKSDB_NAMESPACE::CompactionStopStyle:: + kCompactionStopStyleTotalSize: + return 0x1; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::CompactionStopStyle enum for + // the provided Java org.rocksdb.CompactionStopStyle + static ROCKSDB_NAMESPACE::CompactionStopStyle toCppCompactionStopStyle( + jbyte jcompaction_stop_style) { + switch (jcompaction_stop_style) { + case 0x0: + return ROCKSDB_NAMESPACE::CompactionStopStyle:: + kCompactionStopStyleSimilarSize; + case 0x1: + return ROCKSDB_NAMESPACE::CompactionStopStyle:: + kCompactionStopStyleTotalSize; + default: + // undefined/default + return ROCKSDB_NAMESPACE::CompactionStopStyle:: + kCompactionStopStyleSimilarSize; + } + } +}; + +// The portal class for org.rocksdb.CompressionType +class CompressionTypeJni { + public: + // Returns the equivalent org.rocksdb.CompressionType for the provided + // C++ ROCKSDB_NAMESPACE::CompressionType enum + static jbyte toJavaCompressionType( + const ROCKSDB_NAMESPACE::CompressionType& compression_type) { + switch (compression_type) { + case ROCKSDB_NAMESPACE::CompressionType::kNoCompression: + return 0x0; + case ROCKSDB_NAMESPACE::CompressionType::kSnappyCompression: + return 0x1; + case ROCKSDB_NAMESPACE::CompressionType::kZlibCompression: + return 0x2; + case ROCKSDB_NAMESPACE::CompressionType::kBZip2Compression: + return 0x3; + case ROCKSDB_NAMESPACE::CompressionType::kLZ4Compression: + return 0x4; + case ROCKSDB_NAMESPACE::CompressionType::kLZ4HCCompression: + return 0x5; + case ROCKSDB_NAMESPACE::CompressionType::kXpressCompression: + return 0x6; + case ROCKSDB_NAMESPACE::CompressionType::kZSTD: + return 0x7; + case ROCKSDB_NAMESPACE::CompressionType::kDisableCompressionOption: + default: + return 0x7F; + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::CompressionType enum for the + // provided Java org.rocksdb.CompressionType + static ROCKSDB_NAMESPACE::CompressionType toCppCompressionType( + jbyte jcompression_type) { + switch (jcompression_type) { + case 0x0: + return ROCKSDB_NAMESPACE::CompressionType::kNoCompression; + case 0x1: + return ROCKSDB_NAMESPACE::CompressionType::kSnappyCompression; + case 0x2: + return ROCKSDB_NAMESPACE::CompressionType::kZlibCompression; + case 0x3: + return ROCKSDB_NAMESPACE::CompressionType::kBZip2Compression; + case 0x4: + return ROCKSDB_NAMESPACE::CompressionType::kLZ4Compression; + case 0x5: + return ROCKSDB_NAMESPACE::CompressionType::kLZ4HCCompression; + case 0x6: + return ROCKSDB_NAMESPACE::CompressionType::kXpressCompression; + case 0x7: + return ROCKSDB_NAMESPACE::CompressionType::kZSTD; + case 0x7F: + default: + return ROCKSDB_NAMESPACE::CompressionType::kDisableCompressionOption; + } + } +}; + +// The portal class for org.rocksdb.CompactionPriority +class CompactionPriorityJni { + public: + // Returns the equivalent org.rocksdb.CompactionPriority for the provided + // C++ ROCKSDB_NAMESPACE::CompactionPri enum + static jbyte toJavaCompactionPriority( + const ROCKSDB_NAMESPACE::CompactionPri& compaction_priority) { + switch (compaction_priority) { + case ROCKSDB_NAMESPACE::CompactionPri::kByCompensatedSize: + return 0x0; + case ROCKSDB_NAMESPACE::CompactionPri::kOldestLargestSeqFirst: + return 0x1; + case ROCKSDB_NAMESPACE::CompactionPri::kOldestSmallestSeqFirst: + return 0x2; + case ROCKSDB_NAMESPACE::CompactionPri::kMinOverlappingRatio: + return 0x3; + case ROCKSDB_NAMESPACE::CompactionPri::kRoundRobin: + return 0x4; + default: + return 0x0; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::CompactionPri enum for the + // provided Java org.rocksdb.CompactionPriority + static ROCKSDB_NAMESPACE::CompactionPri toCppCompactionPriority( + jbyte jcompaction_priority) { + switch (jcompaction_priority) { + case 0x0: + return ROCKSDB_NAMESPACE::CompactionPri::kByCompensatedSize; + case 0x1: + return ROCKSDB_NAMESPACE::CompactionPri::kOldestLargestSeqFirst; + case 0x2: + return ROCKSDB_NAMESPACE::CompactionPri::kOldestSmallestSeqFirst; + case 0x3: + return ROCKSDB_NAMESPACE::CompactionPri::kMinOverlappingRatio; + case 0x4: + return ROCKSDB_NAMESPACE::CompactionPri::kRoundRobin; + default: + // undefined/default + return ROCKSDB_NAMESPACE::CompactionPri::kByCompensatedSize; + } + } +}; + +// The portal class for org.rocksdb.AccessHint +class AccessHintJni { + public: + // Returns the equivalent org.rocksdb.AccessHint for the provided + // C++ ROCKSDB_NAMESPACE::DBOptions::AccessHint enum + static jbyte toJavaAccessHint( + const ROCKSDB_NAMESPACE::DBOptions::AccessHint& access_hint) { + switch (access_hint) { + case ROCKSDB_NAMESPACE::DBOptions::AccessHint::NONE: + return 0x0; + case ROCKSDB_NAMESPACE::DBOptions::AccessHint::NORMAL: + return 0x1; + case ROCKSDB_NAMESPACE::DBOptions::AccessHint::SEQUENTIAL: + return 0x2; + case ROCKSDB_NAMESPACE::DBOptions::AccessHint::WILLNEED: + return 0x3; + default: + // undefined/default + return 0x1; + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::DBOptions::AccessHint enum + // for the provided Java org.rocksdb.AccessHint + static ROCKSDB_NAMESPACE::DBOptions::AccessHint toCppAccessHint( + jbyte jaccess_hint) { + switch (jaccess_hint) { + case 0x0: + return ROCKSDB_NAMESPACE::DBOptions::AccessHint::NONE; + case 0x1: + return ROCKSDB_NAMESPACE::DBOptions::AccessHint::NORMAL; + case 0x2: + return ROCKSDB_NAMESPACE::DBOptions::AccessHint::SEQUENTIAL; + case 0x3: + return ROCKSDB_NAMESPACE::DBOptions::AccessHint::WILLNEED; + default: + // undefined/default + return ROCKSDB_NAMESPACE::DBOptions::AccessHint::NORMAL; + } + } +}; + +// The portal class for org.rocksdb.WALRecoveryMode +class WALRecoveryModeJni { + public: + // Returns the equivalent org.rocksdb.WALRecoveryMode for the provided + // C++ ROCKSDB_NAMESPACE::WALRecoveryMode enum + static jbyte toJavaWALRecoveryMode( + const ROCKSDB_NAMESPACE::WALRecoveryMode& wal_recovery_mode) { + switch (wal_recovery_mode) { + case ROCKSDB_NAMESPACE::WALRecoveryMode::kTolerateCorruptedTailRecords: + return 0x0; + case ROCKSDB_NAMESPACE::WALRecoveryMode::kAbsoluteConsistency: + return 0x1; + case ROCKSDB_NAMESPACE::WALRecoveryMode::kPointInTimeRecovery: + return 0x2; + case ROCKSDB_NAMESPACE::WALRecoveryMode::kSkipAnyCorruptedRecords: + return 0x3; + default: + // undefined/default + return 0x2; + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::WALRecoveryMode enum for the + // provided Java org.rocksdb.WALRecoveryMode + static ROCKSDB_NAMESPACE::WALRecoveryMode toCppWALRecoveryMode( + jbyte jwal_recovery_mode) { + switch (jwal_recovery_mode) { + case 0x0: + return ROCKSDB_NAMESPACE::WALRecoveryMode:: + kTolerateCorruptedTailRecords; + case 0x1: + return ROCKSDB_NAMESPACE::WALRecoveryMode::kAbsoluteConsistency; + case 0x2: + return ROCKSDB_NAMESPACE::WALRecoveryMode::kPointInTimeRecovery; + case 0x3: + return ROCKSDB_NAMESPACE::WALRecoveryMode::kSkipAnyCorruptedRecords; + default: + // undefined/default + return ROCKSDB_NAMESPACE::WALRecoveryMode::kPointInTimeRecovery; + } + } +}; + +// The portal class for org.rocksdb.TickerType +class TickerTypeJni { + public: + // Returns the equivalent org.rocksdb.TickerType for the provided + // C++ ROCKSDB_NAMESPACE::Tickers enum + static jbyte toJavaTickerType(const ROCKSDB_NAMESPACE::Tickers& tickers) { + switch (tickers) { + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_MISS: + return 0x0; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_HIT: + return 0x1; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_ADD: + return 0x2; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_ADD_FAILURES: + return 0x3; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_MISS: + return 0x4; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_HIT: + return 0x5; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_ADD: + return 0x6; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_BYTES_INSERT: + return 0x7; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_BYTES_EVICT: + return 0x8; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_MISS: + return 0x9; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_HIT: + return 0xA; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_ADD: + return 0xB; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_BYTES_INSERT: + return 0xC; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_BYTES_EVICT: + return 0xD; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_MISS: + return 0xE; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_HIT: + return 0xF; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_ADD: + return 0x10; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_BYTES_INSERT: + return 0x11; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_BYTES_READ: + return 0x12; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_BYTES_WRITE: + return 0x13; + case ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_USEFUL: + return 0x14; + case ROCKSDB_NAMESPACE::Tickers::PERSISTENT_CACHE_HIT: + return 0x15; + case ROCKSDB_NAMESPACE::Tickers::PERSISTENT_CACHE_MISS: + return 0x16; + case ROCKSDB_NAMESPACE::Tickers::SIM_BLOCK_CACHE_HIT: + return 0x17; + case ROCKSDB_NAMESPACE::Tickers::SIM_BLOCK_CACHE_MISS: + return 0x18; + case ROCKSDB_NAMESPACE::Tickers::MEMTABLE_HIT: + return 0x19; + case ROCKSDB_NAMESPACE::Tickers::MEMTABLE_MISS: + return 0x1A; + case ROCKSDB_NAMESPACE::Tickers::GET_HIT_L0: + return 0x1B; + case ROCKSDB_NAMESPACE::Tickers::GET_HIT_L1: + return 0x1C; + case ROCKSDB_NAMESPACE::Tickers::GET_HIT_L2_AND_UP: + return 0x1D; + case ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_NEWER_ENTRY: + return 0x1E; + case ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_OBSOLETE: + return 0x1F; + case ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_RANGE_DEL: + return 0x20; + case ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_USER: + return 0x21; + case ROCKSDB_NAMESPACE::Tickers::COMPACTION_RANGE_DEL_DROP_OBSOLETE: + return 0x22; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_WRITTEN: + return 0x23; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_READ: + return 0x24; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_UPDATED: + return 0x25; + case ROCKSDB_NAMESPACE::Tickers::BYTES_WRITTEN: + return 0x26; + case ROCKSDB_NAMESPACE::Tickers::BYTES_READ: + return 0x27; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_SEEK: + return 0x28; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_NEXT: + return 0x29; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_PREV: + return 0x2A; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_SEEK_FOUND: + return 0x2B; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_NEXT_FOUND: + return 0x2C; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_PREV_FOUND: + return 0x2D; + case ROCKSDB_NAMESPACE::Tickers::ITER_BYTES_READ: + return 0x2E; + case ROCKSDB_NAMESPACE::Tickers::NO_FILE_CLOSES: + return 0x2F; + case ROCKSDB_NAMESPACE::Tickers::NO_FILE_OPENS: + return 0x30; + case ROCKSDB_NAMESPACE::Tickers::NO_FILE_ERRORS: + return 0x31; + case ROCKSDB_NAMESPACE::Tickers::STALL_L0_SLOWDOWN_MICROS: + return 0x32; + case ROCKSDB_NAMESPACE::Tickers::STALL_MEMTABLE_COMPACTION_MICROS: + return 0x33; + case ROCKSDB_NAMESPACE::Tickers::STALL_L0_NUM_FILES_MICROS: + return 0x34; + case ROCKSDB_NAMESPACE::Tickers::STALL_MICROS: + return 0x35; + case ROCKSDB_NAMESPACE::Tickers::DB_MUTEX_WAIT_MICROS: + return 0x36; + case ROCKSDB_NAMESPACE::Tickers::RATE_LIMIT_DELAY_MILLIS: + return 0x37; + case ROCKSDB_NAMESPACE::Tickers::NO_ITERATORS: + return 0x38; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_CALLS: + return 0x39; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_KEYS_READ: + return 0x3A; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_BYTES_READ: + return 0x3B; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_FILTERED_DELETES: + return 0x3C; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_MERGE_FAILURES: + return 0x3D; + case ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_PREFIX_CHECKED: + return 0x3E; + case ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_PREFIX_USEFUL: + return 0x3F; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_OF_RESEEKS_IN_ITERATION: + return 0x40; + case ROCKSDB_NAMESPACE::Tickers::GET_UPDATES_SINCE_CALLS: + return 0x41; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_MISS: + return 0x42; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_HIT: + return 0x43; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_ADD: + return 0x44; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_ADD_FAILURES: + return 0x45; + case ROCKSDB_NAMESPACE::Tickers::WAL_FILE_SYNCED: + return 0x46; + case ROCKSDB_NAMESPACE::Tickers::WAL_FILE_BYTES: + return 0x47; + case ROCKSDB_NAMESPACE::Tickers::WRITE_DONE_BY_SELF: + return 0x48; + case ROCKSDB_NAMESPACE::Tickers::WRITE_DONE_BY_OTHER: + return 0x49; + case ROCKSDB_NAMESPACE::Tickers::WRITE_TIMEDOUT: + return 0x4A; + case ROCKSDB_NAMESPACE::Tickers::WRITE_WITH_WAL: + return 0x4B; + case ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES: + return 0x4C; + case ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES: + return 0x4D; + case ROCKSDB_NAMESPACE::Tickers::FLUSH_WRITE_BYTES: + return 0x4E; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_DIRECT_LOAD_TABLE_PROPERTIES: + return 0x4F; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_ACQUIRES: + return 0x50; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_RELEASES: + return 0x51; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_CLEANUPS: + return 0x52; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_COMPRESSED: + return 0x53; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_DECOMPRESSED: + return 0x54; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_NOT_COMPRESSED: + return 0x55; + case ROCKSDB_NAMESPACE::Tickers::MERGE_OPERATION_TOTAL_TIME: + return 0x56; + case ROCKSDB_NAMESPACE::Tickers::FILTER_OPERATION_TOTAL_TIME: + return 0x57; + case ROCKSDB_NAMESPACE::Tickers::ROW_CACHE_HIT: + return 0x58; + case ROCKSDB_NAMESPACE::Tickers::ROW_CACHE_MISS: + return 0x59; + case ROCKSDB_NAMESPACE::Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES: + return 0x5A; + case ROCKSDB_NAMESPACE::Tickers::READ_AMP_TOTAL_READ_BYTES: + return 0x5B; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_RATE_LIMITER_DRAINS: + return 0x5C; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_ITER_SKIP: + return 0x5D; + case ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_KEYS_FOUND: + return 0x5E; + case ROCKSDB_NAMESPACE::Tickers::NO_ITERATOR_CREATED: + // -0x01 so we can skip over the already taken 0x5F (TICKER_ENUM_MAX). + return -0x01; + case ROCKSDB_NAMESPACE::Tickers::NO_ITERATOR_DELETED: + return 0x60; + case ROCKSDB_NAMESPACE::Tickers::COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE: + return 0x61; + case ROCKSDB_NAMESPACE::Tickers::COMPACTION_CANCELLED: + return 0x62; + case ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_FULL_POSITIVE: + return 0x63; + case ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_FULL_TRUE_POSITIVE: + return 0x64; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_PUT: + return 0x65; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_WRITE: + return 0x66; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_GET: + return 0x67; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_MULTIGET: + return 0x68; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_SEEK: + return 0x69; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_NEXT: + return 0x6A; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_PREV: + return 0x6B; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_KEYS_WRITTEN: + return 0x6C; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_KEYS_READ: + return 0x6D; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BYTES_WRITTEN: + return 0x6E; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BYTES_READ: + return 0x6F; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_INLINED: + return 0x70; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_INLINED_TTL: + return 0x71; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_BLOB: + return 0x72; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_BLOB_TTL: + return 0x73; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_BYTES_WRITTEN: + return 0x74; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_BYTES_READ: + return 0x75; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_SYNCED: + return 0x76; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EXPIRED_COUNT: + return 0x77; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EXPIRED_SIZE: + return 0x78; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EVICTED_COUNT: + return 0x79; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EVICTED_SIZE: + return 0x7A; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_FILES: + return 0x7B; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_NEW_FILES: + return 0x7C; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_FAILURES: + return 0x7D; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_OVERWRITTEN: + return 0x7E; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_EXPIRED: + return 0x7F; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_RELOCATED: + return -0x02; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_OVERWRITTEN: + return -0x03; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_EXPIRED: + return -0x04; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_RELOCATED: + return -0x05; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_NUM_FILES_EVICTED: + return -0x06; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_NUM_KEYS_EVICTED: + return -0x07; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_BYTES_EVICTED: + return -0x08; + case ROCKSDB_NAMESPACE::Tickers::TXN_PREPARE_MUTEX_OVERHEAD: + return -0x09; + case ROCKSDB_NAMESPACE::Tickers::TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD: + return -0x0A; + case ROCKSDB_NAMESPACE::Tickers::TXN_DUPLICATE_KEY_OVERHEAD: + return -0x0B; + case ROCKSDB_NAMESPACE::Tickers::TXN_SNAPSHOT_MUTEX_OVERHEAD: + return -0x0C; + case ROCKSDB_NAMESPACE::Tickers::TXN_GET_TRY_AGAIN: + return -0x0D; + case ROCKSDB_NAMESPACE::Tickers::FILES_MARKED_TRASH: + return -0x0E; + case ROCKSDB_NAMESPACE::Tickers::FILES_DELETED_IMMEDIATELY: + return -0X0F; + case ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_MARKED: + return -0x10; + case ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_PERIODIC: + return -0x11; + case ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_TTL: + return -0x12; + case ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_MARKED: + return -0x13; + case ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_PERIODIC: + return -0x14; + case ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_TTL: + return -0x15; + case ROCKSDB_NAMESPACE::Tickers::ERROR_HANDLER_BG_ERROR_COUNT: + return -0x16; + case ROCKSDB_NAMESPACE::Tickers::ERROR_HANDLER_BG_IO_ERROR_COUNT: + return -0x17; + case ROCKSDB_NAMESPACE::Tickers:: + ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT: + return -0x18; + case ROCKSDB_NAMESPACE::Tickers::ERROR_HANDLER_AUTORESUME_COUNT: + return -0x19; + case ROCKSDB_NAMESPACE::Tickers:: + ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT: + return -0x1A; + case ROCKSDB_NAMESPACE::Tickers::ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT: + return -0x1B; + case ROCKSDB_NAMESPACE::Tickers::MEMTABLE_PAYLOAD_BYTES_AT_FLUSH: + return -0x1C; + case ROCKSDB_NAMESPACE::Tickers::MEMTABLE_GARBAGE_BYTES_AT_FLUSH: + return -0x1D; + case ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_HITS: + return -0x1E; + case ROCKSDB_NAMESPACE::Tickers::VERIFY_CHECKSUM_READ_BYTES: + return -0x1F; + case ROCKSDB_NAMESPACE::Tickers::BACKUP_READ_BYTES: + return -0x20; + case ROCKSDB_NAMESPACE::Tickers::BACKUP_WRITE_BYTES: + return -0x21; + case ROCKSDB_NAMESPACE::Tickers::REMOTE_COMPACT_READ_BYTES: + return -0x22; + case ROCKSDB_NAMESPACE::Tickers::REMOTE_COMPACT_WRITE_BYTES: + return -0x23; + case ROCKSDB_NAMESPACE::Tickers::HOT_FILE_READ_BYTES: + return -0x24; + case ROCKSDB_NAMESPACE::Tickers::WARM_FILE_READ_BYTES: + return -0x25; + case ROCKSDB_NAMESPACE::Tickers::COLD_FILE_READ_BYTES: + return -0x26; + case ROCKSDB_NAMESPACE::Tickers::HOT_FILE_READ_COUNT: + return -0x27; + case ROCKSDB_NAMESPACE::Tickers::WARM_FILE_READ_COUNT: + return -0x28; + case ROCKSDB_NAMESPACE::Tickers::COLD_FILE_READ_COUNT: + return -0x29; + case ROCKSDB_NAMESPACE::Tickers::LAST_LEVEL_READ_BYTES: + return -0x2A; + case ROCKSDB_NAMESPACE::Tickers::LAST_LEVEL_READ_COUNT: + return -0x2B; + case ROCKSDB_NAMESPACE::Tickers::NON_LAST_LEVEL_READ_BYTES: + return -0x2C; + case ROCKSDB_NAMESPACE::Tickers::NON_LAST_LEVEL_READ_COUNT: + return -0x2D; + case ROCKSDB_NAMESPACE::Tickers::BLOCK_CHECKSUM_COMPUTE_COUNT: + return -0x2E; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_MISS: + return -0x2F; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_HIT: + return -0x30; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_ADD: + return -0x31; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_ADD_FAILURES: + return -0x32; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_BYTES_READ: + return -0x33; + case ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_BYTES_WRITE: + return -0x34; + case ROCKSDB_NAMESPACE::Tickers::READ_ASYNC_MICROS: + return -0x35; + case ROCKSDB_NAMESPACE::Tickers::ASYNC_READ_ERROR_COUNT: + return -0x36; + case ROCKSDB_NAMESPACE::Tickers::TICKER_ENUM_MAX: + // 0x5F was the max value in the initial copy of tickers to Java. + // Since these values are exposed directly to Java clients, we keep + // the value the same forever. + // + // TODO: This particular case seems confusing and unnecessary to pin the + // value since it's meant to be the number of tickers, not an actual + // ticker value. But we aren't yet in a position to fix it since the + // number of tickers doesn't fit in the Java representation (jbyte). + return 0x5F; + default: + // undefined/default + return 0x0; + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::Tickers enum for the + // provided Java org.rocksdb.TickerType + static ROCKSDB_NAMESPACE::Tickers toCppTickers(jbyte jticker_type) { + switch (jticker_type) { + case 0x0: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_MISS; + case 0x1: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_HIT; + case 0x2: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_ADD; + case 0x3: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_ADD_FAILURES; + case 0x4: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_MISS; + case 0x5: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_HIT; + case 0x6: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_ADD; + case 0x7: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_BYTES_INSERT; + case 0x8: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_INDEX_BYTES_EVICT; + case 0x9: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_MISS; + case 0xA: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_HIT; + case 0xB: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_ADD; + case 0xC: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_BYTES_INSERT; + case 0xD: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_FILTER_BYTES_EVICT; + case 0xE: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_MISS; + case 0xF: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_HIT; + case 0x10: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_ADD; + case 0x11: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_DATA_BYTES_INSERT; + case 0x12: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_BYTES_READ; + case 0x13: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_BYTES_WRITE; + case 0x14: + return ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_USEFUL; + case 0x15: + return ROCKSDB_NAMESPACE::Tickers::PERSISTENT_CACHE_HIT; + case 0x16: + return ROCKSDB_NAMESPACE::Tickers::PERSISTENT_CACHE_MISS; + case 0x17: + return ROCKSDB_NAMESPACE::Tickers::SIM_BLOCK_CACHE_HIT; + case 0x18: + return ROCKSDB_NAMESPACE::Tickers::SIM_BLOCK_CACHE_MISS; + case 0x19: + return ROCKSDB_NAMESPACE::Tickers::MEMTABLE_HIT; + case 0x1A: + return ROCKSDB_NAMESPACE::Tickers::MEMTABLE_MISS; + case 0x1B: + return ROCKSDB_NAMESPACE::Tickers::GET_HIT_L0; + case 0x1C: + return ROCKSDB_NAMESPACE::Tickers::GET_HIT_L1; + case 0x1D: + return ROCKSDB_NAMESPACE::Tickers::GET_HIT_L2_AND_UP; + case 0x1E: + return ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_NEWER_ENTRY; + case 0x1F: + return ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_OBSOLETE; + case 0x20: + return ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_RANGE_DEL; + case 0x21: + return ROCKSDB_NAMESPACE::Tickers::COMPACTION_KEY_DROP_USER; + case 0x22: + return ROCKSDB_NAMESPACE::Tickers::COMPACTION_RANGE_DEL_DROP_OBSOLETE; + case 0x23: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_WRITTEN; + case 0x24: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_READ; + case 0x25: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_KEYS_UPDATED; + case 0x26: + return ROCKSDB_NAMESPACE::Tickers::BYTES_WRITTEN; + case 0x27: + return ROCKSDB_NAMESPACE::Tickers::BYTES_READ; + case 0x28: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_SEEK; + case 0x29: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_NEXT; + case 0x2A: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_PREV; + case 0x2B: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_SEEK_FOUND; + case 0x2C: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_NEXT_FOUND; + case 0x2D: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_DB_PREV_FOUND; + case 0x2E: + return ROCKSDB_NAMESPACE::Tickers::ITER_BYTES_READ; + case 0x2F: + return ROCKSDB_NAMESPACE::Tickers::NO_FILE_CLOSES; + case 0x30: + return ROCKSDB_NAMESPACE::Tickers::NO_FILE_OPENS; + case 0x31: + return ROCKSDB_NAMESPACE::Tickers::NO_FILE_ERRORS; + case 0x32: + return ROCKSDB_NAMESPACE::Tickers::STALL_L0_SLOWDOWN_MICROS; + case 0x33: + return ROCKSDB_NAMESPACE::Tickers::STALL_MEMTABLE_COMPACTION_MICROS; + case 0x34: + return ROCKSDB_NAMESPACE::Tickers::STALL_L0_NUM_FILES_MICROS; + case 0x35: + return ROCKSDB_NAMESPACE::Tickers::STALL_MICROS; + case 0x36: + return ROCKSDB_NAMESPACE::Tickers::DB_MUTEX_WAIT_MICROS; + case 0x37: + return ROCKSDB_NAMESPACE::Tickers::RATE_LIMIT_DELAY_MILLIS; + case 0x38: + return ROCKSDB_NAMESPACE::Tickers::NO_ITERATORS; + case 0x39: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_CALLS; + case 0x3A: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_KEYS_READ; + case 0x3B: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_BYTES_READ; + case 0x3C: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_FILTERED_DELETES; + case 0x3D: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_MERGE_FAILURES; + case 0x3E: + return ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_PREFIX_CHECKED; + case 0x3F: + return ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_PREFIX_USEFUL; + case 0x40: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_OF_RESEEKS_IN_ITERATION; + case 0x41: + return ROCKSDB_NAMESPACE::Tickers::GET_UPDATES_SINCE_CALLS; + case 0x42: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_MISS; + case 0x43: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_HIT; + case 0x44: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_ADD; + case 0x45: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_COMPRESSED_ADD_FAILURES; + case 0x46: + return ROCKSDB_NAMESPACE::Tickers::WAL_FILE_SYNCED; + case 0x47: + return ROCKSDB_NAMESPACE::Tickers::WAL_FILE_BYTES; + case 0x48: + return ROCKSDB_NAMESPACE::Tickers::WRITE_DONE_BY_SELF; + case 0x49: + return ROCKSDB_NAMESPACE::Tickers::WRITE_DONE_BY_OTHER; + case 0x4A: + return ROCKSDB_NAMESPACE::Tickers::WRITE_TIMEDOUT; + case 0x4B: + return ROCKSDB_NAMESPACE::Tickers::WRITE_WITH_WAL; + case 0x4C: + return ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES; + case 0x4D: + return ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES; + case 0x4E: + return ROCKSDB_NAMESPACE::Tickers::FLUSH_WRITE_BYTES; + case 0x4F: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_DIRECT_LOAD_TABLE_PROPERTIES; + case 0x50: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_ACQUIRES; + case 0x51: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_RELEASES; + case 0x52: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_SUPERVERSION_CLEANUPS; + case 0x53: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_COMPRESSED; + case 0x54: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_DECOMPRESSED; + case 0x55: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_BLOCK_NOT_COMPRESSED; + case 0x56: + return ROCKSDB_NAMESPACE::Tickers::MERGE_OPERATION_TOTAL_TIME; + case 0x57: + return ROCKSDB_NAMESPACE::Tickers::FILTER_OPERATION_TOTAL_TIME; + case 0x58: + return ROCKSDB_NAMESPACE::Tickers::ROW_CACHE_HIT; + case 0x59: + return ROCKSDB_NAMESPACE::Tickers::ROW_CACHE_MISS; + case 0x5A: + return ROCKSDB_NAMESPACE::Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES; + case 0x5B: + return ROCKSDB_NAMESPACE::Tickers::READ_AMP_TOTAL_READ_BYTES; + case 0x5C: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_RATE_LIMITER_DRAINS; + case 0x5D: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_ITER_SKIP; + case 0x5E: + return ROCKSDB_NAMESPACE::Tickers::NUMBER_MULTIGET_KEYS_FOUND; + case -0x01: + // -0x01 so we can skip over the already taken 0x5F (TICKER_ENUM_MAX). + return ROCKSDB_NAMESPACE::Tickers::NO_ITERATOR_CREATED; + case 0x60: + return ROCKSDB_NAMESPACE::Tickers::NO_ITERATOR_DELETED; + case 0x61: + return ROCKSDB_NAMESPACE::Tickers:: + COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE; + case 0x62: + return ROCKSDB_NAMESPACE::Tickers::COMPACTION_CANCELLED; + case 0x63: + return ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_FULL_POSITIVE; + case 0x64: + return ROCKSDB_NAMESPACE::Tickers::BLOOM_FILTER_FULL_TRUE_POSITIVE; + case 0x65: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_PUT; + case 0x66: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_WRITE; + case 0x67: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_GET; + case 0x68: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_MULTIGET; + case 0x69: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_SEEK; + case 0x6A: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_NEXT; + case 0x6B: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_PREV; + case 0x6C: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_KEYS_WRITTEN; + case 0x6D: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_NUM_KEYS_READ; + case 0x6E: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BYTES_WRITTEN; + case 0x6F: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BYTES_READ; + case 0x70: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_INLINED; + case 0x71: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_INLINED_TTL; + case 0x72: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_BLOB; + case 0x73: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_WRITE_BLOB_TTL; + case 0x74: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_BYTES_WRITTEN; + case 0x75: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_BYTES_READ; + case 0x76: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_FILE_SYNCED; + case 0x77: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EXPIRED_COUNT; + case 0x78: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EXPIRED_SIZE; + case 0x79: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EVICTED_COUNT; + case 0x7A: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_BLOB_INDEX_EVICTED_SIZE; + case 0x7B: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_FILES; + case 0x7C: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_NEW_FILES; + case 0x7D: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_FAILURES; + case 0x7E: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_OVERWRITTEN; + case 0x7F: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_EXPIRED; + case -0x02: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_NUM_KEYS_RELOCATED; + case -0x03: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_OVERWRITTEN; + case -0x04: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_EXPIRED; + case -0x05: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_GC_BYTES_RELOCATED; + case -0x06: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_NUM_FILES_EVICTED; + case -0x07: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_NUM_KEYS_EVICTED; + case -0x08: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_FIFO_BYTES_EVICTED; + case -0x09: + return ROCKSDB_NAMESPACE::Tickers::TXN_PREPARE_MUTEX_OVERHEAD; + case -0x0A: + return ROCKSDB_NAMESPACE::Tickers::TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD; + case -0x0B: + return ROCKSDB_NAMESPACE::Tickers::TXN_DUPLICATE_KEY_OVERHEAD; + case -0x0C: + return ROCKSDB_NAMESPACE::Tickers::TXN_SNAPSHOT_MUTEX_OVERHEAD; + case -0x0D: + return ROCKSDB_NAMESPACE::Tickers::TXN_GET_TRY_AGAIN; + case -0x0E: + return ROCKSDB_NAMESPACE::Tickers::FILES_MARKED_TRASH; + case -0x0F: + return ROCKSDB_NAMESPACE::Tickers::FILES_DELETED_IMMEDIATELY; + case -0x10: + return ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_MARKED; + case -0x11: + return ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_PERIODIC; + case -0x12: + return ROCKSDB_NAMESPACE::Tickers::COMPACT_READ_BYTES_TTL; + case -0x13: + return ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_MARKED; + case -0x14: + return ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_PERIODIC; + case -0x15: + return ROCKSDB_NAMESPACE::Tickers::COMPACT_WRITE_BYTES_TTL; + case -0x16: + return ROCKSDB_NAMESPACE::Tickers::ERROR_HANDLER_BG_ERROR_COUNT; + case -0x17: + return ROCKSDB_NAMESPACE::Tickers::ERROR_HANDLER_BG_IO_ERROR_COUNT; + case -0x18: + return ROCKSDB_NAMESPACE::Tickers:: + ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT; + case -0x19: + return ROCKSDB_NAMESPACE::Tickers::ERROR_HANDLER_AUTORESUME_COUNT; + case -0x1A: + return ROCKSDB_NAMESPACE::Tickers:: + ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT; + case -0x1B: + return ROCKSDB_NAMESPACE::Tickers:: + ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT; + case -0x1C: + return ROCKSDB_NAMESPACE::Tickers::MEMTABLE_PAYLOAD_BYTES_AT_FLUSH; + case -0x1D: + return ROCKSDB_NAMESPACE::Tickers::MEMTABLE_GARBAGE_BYTES_AT_FLUSH; + case -0x1E: + return ROCKSDB_NAMESPACE::Tickers::SECONDARY_CACHE_HITS; + case -0x1F: + return ROCKSDB_NAMESPACE::Tickers::VERIFY_CHECKSUM_READ_BYTES; + case -0x20: + return ROCKSDB_NAMESPACE::Tickers::BACKUP_READ_BYTES; + case -0x21: + return ROCKSDB_NAMESPACE::Tickers::BACKUP_WRITE_BYTES; + case -0x22: + return ROCKSDB_NAMESPACE::Tickers::REMOTE_COMPACT_READ_BYTES; + case -0x23: + return ROCKSDB_NAMESPACE::Tickers::REMOTE_COMPACT_WRITE_BYTES; + case -0x24: + return ROCKSDB_NAMESPACE::Tickers::HOT_FILE_READ_BYTES; + case -0x25: + return ROCKSDB_NAMESPACE::Tickers::WARM_FILE_READ_BYTES; + case -0x26: + return ROCKSDB_NAMESPACE::Tickers::COLD_FILE_READ_BYTES; + case -0x27: + return ROCKSDB_NAMESPACE::Tickers::HOT_FILE_READ_COUNT; + case -0x28: + return ROCKSDB_NAMESPACE::Tickers::WARM_FILE_READ_COUNT; + case -0x29: + return ROCKSDB_NAMESPACE::Tickers::COLD_FILE_READ_COUNT; + case -0x2A: + return ROCKSDB_NAMESPACE::Tickers::LAST_LEVEL_READ_BYTES; + case -0x2B: + return ROCKSDB_NAMESPACE::Tickers::LAST_LEVEL_READ_COUNT; + case -0x2C: + return ROCKSDB_NAMESPACE::Tickers::NON_LAST_LEVEL_READ_BYTES; + case -0x2D: + return ROCKSDB_NAMESPACE::Tickers::NON_LAST_LEVEL_READ_COUNT; + case -0x2E: + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CHECKSUM_COMPUTE_COUNT; + case -0x2F: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_MISS; + case -0x30: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_HIT; + case -0x31: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_ADD; + case -0x32: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_ADD_FAILURES; + case -0x33: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_BYTES_READ; + case -0x34: + return ROCKSDB_NAMESPACE::Tickers::BLOB_DB_CACHE_BYTES_WRITE; + case -0x35: + return ROCKSDB_NAMESPACE::Tickers::READ_ASYNC_MICROS; + case -0x36: + return ROCKSDB_NAMESPACE::Tickers::ASYNC_READ_ERROR_COUNT; + case 0x5F: + // 0x5F was the max value in the initial copy of tickers to Java. + // Since these values are exposed directly to Java clients, we keep + // the value the same forever. + // + // TODO: This particular case seems confusing and unnecessary to pin the + // value since it's meant to be the number of tickers, not an actual + // ticker value. But we aren't yet in a position to fix it since the + // number of tickers doesn't fit in the Java representation (jbyte). + return ROCKSDB_NAMESPACE::Tickers::TICKER_ENUM_MAX; + + default: + // undefined/default + return ROCKSDB_NAMESPACE::Tickers::BLOCK_CACHE_MISS; + } + } +}; + +// The portal class for org.rocksdb.HistogramType +class HistogramTypeJni { + public: + // Returns the equivalent org.rocksdb.HistogramType for the provided + // C++ ROCKSDB_NAMESPACE::Histograms enum + static jbyte toJavaHistogramsType( + const ROCKSDB_NAMESPACE::Histograms& histograms) { + switch (histograms) { + case ROCKSDB_NAMESPACE::Histograms::DB_GET: + return 0x0; + case ROCKSDB_NAMESPACE::Histograms::DB_WRITE: + return 0x1; + case ROCKSDB_NAMESPACE::Histograms::COMPACTION_TIME: + return 0x2; + case ROCKSDB_NAMESPACE::Histograms::SUBCOMPACTION_SETUP_TIME: + return 0x3; + case ROCKSDB_NAMESPACE::Histograms::TABLE_SYNC_MICROS: + return 0x4; + case ROCKSDB_NAMESPACE::Histograms::COMPACTION_OUTFILE_SYNC_MICROS: + return 0x5; + case ROCKSDB_NAMESPACE::Histograms::WAL_FILE_SYNC_MICROS: + return 0x6; + case ROCKSDB_NAMESPACE::Histograms::MANIFEST_FILE_SYNC_MICROS: + return 0x7; + case ROCKSDB_NAMESPACE::Histograms::TABLE_OPEN_IO_MICROS: + return 0x8; + case ROCKSDB_NAMESPACE::Histograms::DB_MULTIGET: + return 0x9; + case ROCKSDB_NAMESPACE::Histograms::READ_BLOCK_COMPACTION_MICROS: + return 0xA; + case ROCKSDB_NAMESPACE::Histograms::READ_BLOCK_GET_MICROS: + return 0xB; + case ROCKSDB_NAMESPACE::Histograms::WRITE_RAW_BLOCK_MICROS: + return 0xC; + case ROCKSDB_NAMESPACE::Histograms::STALL_L0_SLOWDOWN_COUNT: + return 0xD; + case ROCKSDB_NAMESPACE::Histograms::STALL_MEMTABLE_COMPACTION_COUNT: + return 0xE; + case ROCKSDB_NAMESPACE::Histograms::STALL_L0_NUM_FILES_COUNT: + return 0xF; + case ROCKSDB_NAMESPACE::Histograms::HARD_RATE_LIMIT_DELAY_COUNT: + return 0x10; + case ROCKSDB_NAMESPACE::Histograms::SOFT_RATE_LIMIT_DELAY_COUNT: + return 0x11; + case ROCKSDB_NAMESPACE::Histograms::NUM_FILES_IN_SINGLE_COMPACTION: + return 0x12; + case ROCKSDB_NAMESPACE::Histograms::DB_SEEK: + return 0x13; + case ROCKSDB_NAMESPACE::Histograms::WRITE_STALL: + return 0x14; + case ROCKSDB_NAMESPACE::Histograms::SST_READ_MICROS: + return 0x15; + case ROCKSDB_NAMESPACE::Histograms::NUM_SUBCOMPACTIONS_SCHEDULED: + return 0x16; + case ROCKSDB_NAMESPACE::Histograms::BYTES_PER_READ: + return 0x17; + case ROCKSDB_NAMESPACE::Histograms::BYTES_PER_WRITE: + return 0x18; + case ROCKSDB_NAMESPACE::Histograms::BYTES_PER_MULTIGET: + return 0x19; + case ROCKSDB_NAMESPACE::Histograms::BYTES_COMPRESSED: + return 0x1A; + case ROCKSDB_NAMESPACE::Histograms::BYTES_DECOMPRESSED: + return 0x1B; + case ROCKSDB_NAMESPACE::Histograms::COMPRESSION_TIMES_NANOS: + return 0x1C; + case ROCKSDB_NAMESPACE::Histograms::DECOMPRESSION_TIMES_NANOS: + return 0x1D; + case ROCKSDB_NAMESPACE::Histograms::READ_NUM_MERGE_OPERANDS: + return 0x1E; + // 0x20 to skip 0x1F so TICKER_ENUM_MAX remains unchanged for minor + // version compatibility. + case ROCKSDB_NAMESPACE::Histograms::FLUSH_TIME: + return 0x20; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_KEY_SIZE: + return 0x21; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_VALUE_SIZE: + return 0x22; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_WRITE_MICROS: + return 0x23; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_GET_MICROS: + return 0x24; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_MULTIGET_MICROS: + return 0x25; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_SEEK_MICROS: + return 0x26; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_NEXT_MICROS: + return 0x27; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_PREV_MICROS: + return 0x28; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_WRITE_MICROS: + return 0x29; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_READ_MICROS: + return 0x2A; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_SYNC_MICROS: + return 0x2B; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_GC_MICROS: + return 0x2C; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_COMPRESSION_MICROS: + return 0x2D; + case ROCKSDB_NAMESPACE::Histograms::BLOB_DB_DECOMPRESSION_MICROS: + return 0x2E; + case ROCKSDB_NAMESPACE::Histograms:: + NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL: + return 0x2F; + case ROCKSDB_NAMESPACE::Histograms::NUM_DATA_BLOCKS_READ_PER_LEVEL: + return 0x30; + case ROCKSDB_NAMESPACE::Histograms::NUM_SST_READ_PER_LEVEL: + return 0x31; + case ROCKSDB_NAMESPACE::Histograms::ERROR_HANDLER_AUTORESUME_RETRY_COUNT: + return 0x32; + case ROCKSDB_NAMESPACE::Histograms::ASYNC_READ_BYTES: + return 0x33; + case ROCKSDB_NAMESPACE::Histograms::POLL_WAIT_MICROS: + return 0x34; + case ROCKSDB_NAMESPACE::Histograms::PREFETCHED_BYTES_DISCARDED: + return 0x35; + case ROCKSDB_NAMESPACE::Histograms::MULTIGET_IO_BATCH_SIZE: + return 0x36; + case NUM_LEVEL_READ_PER_MULTIGET: + return 0x37; + case ASYNC_PREFETCH_ABORT_MICROS: + return 0x38; + case ROCKSDB_NAMESPACE::Histograms::HISTOGRAM_ENUM_MAX: + // 0x1F for backwards compatibility on current minor version. + return 0x1F; + + default: + // undefined/default + return 0x0; + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::Histograms enum for the + // provided Java org.rocksdb.HistogramsType + static ROCKSDB_NAMESPACE::Histograms toCppHistograms(jbyte jhistograms_type) { + switch (jhistograms_type) { + case 0x0: + return ROCKSDB_NAMESPACE::Histograms::DB_GET; + case 0x1: + return ROCKSDB_NAMESPACE::Histograms::DB_WRITE; + case 0x2: + return ROCKSDB_NAMESPACE::Histograms::COMPACTION_TIME; + case 0x3: + return ROCKSDB_NAMESPACE::Histograms::SUBCOMPACTION_SETUP_TIME; + case 0x4: + return ROCKSDB_NAMESPACE::Histograms::TABLE_SYNC_MICROS; + case 0x5: + return ROCKSDB_NAMESPACE::Histograms::COMPACTION_OUTFILE_SYNC_MICROS; + case 0x6: + return ROCKSDB_NAMESPACE::Histograms::WAL_FILE_SYNC_MICROS; + case 0x7: + return ROCKSDB_NAMESPACE::Histograms::MANIFEST_FILE_SYNC_MICROS; + case 0x8: + return ROCKSDB_NAMESPACE::Histograms::TABLE_OPEN_IO_MICROS; + case 0x9: + return ROCKSDB_NAMESPACE::Histograms::DB_MULTIGET; + case 0xA: + return ROCKSDB_NAMESPACE::Histograms::READ_BLOCK_COMPACTION_MICROS; + case 0xB: + return ROCKSDB_NAMESPACE::Histograms::READ_BLOCK_GET_MICROS; + case 0xC: + return ROCKSDB_NAMESPACE::Histograms::WRITE_RAW_BLOCK_MICROS; + case 0xD: + return ROCKSDB_NAMESPACE::Histograms::STALL_L0_SLOWDOWN_COUNT; + case 0xE: + return ROCKSDB_NAMESPACE::Histograms::STALL_MEMTABLE_COMPACTION_COUNT; + case 0xF: + return ROCKSDB_NAMESPACE::Histograms::STALL_L0_NUM_FILES_COUNT; + case 0x10: + return ROCKSDB_NAMESPACE::Histograms::HARD_RATE_LIMIT_DELAY_COUNT; + case 0x11: + return ROCKSDB_NAMESPACE::Histograms::SOFT_RATE_LIMIT_DELAY_COUNT; + case 0x12: + return ROCKSDB_NAMESPACE::Histograms::NUM_FILES_IN_SINGLE_COMPACTION; + case 0x13: + return ROCKSDB_NAMESPACE::Histograms::DB_SEEK; + case 0x14: + return ROCKSDB_NAMESPACE::Histograms::WRITE_STALL; + case 0x15: + return ROCKSDB_NAMESPACE::Histograms::SST_READ_MICROS; + case 0x16: + return ROCKSDB_NAMESPACE::Histograms::NUM_SUBCOMPACTIONS_SCHEDULED; + case 0x17: + return ROCKSDB_NAMESPACE::Histograms::BYTES_PER_READ; + case 0x18: + return ROCKSDB_NAMESPACE::Histograms::BYTES_PER_WRITE; + case 0x19: + return ROCKSDB_NAMESPACE::Histograms::BYTES_PER_MULTIGET; + case 0x1A: + return ROCKSDB_NAMESPACE::Histograms::BYTES_COMPRESSED; + case 0x1B: + return ROCKSDB_NAMESPACE::Histograms::BYTES_DECOMPRESSED; + case 0x1C: + return ROCKSDB_NAMESPACE::Histograms::COMPRESSION_TIMES_NANOS; + case 0x1D: + return ROCKSDB_NAMESPACE::Histograms::DECOMPRESSION_TIMES_NANOS; + case 0x1E: + return ROCKSDB_NAMESPACE::Histograms::READ_NUM_MERGE_OPERANDS; + // 0x20 to skip 0x1F so TICKER_ENUM_MAX remains unchanged for minor + // version compatibility. + case 0x20: + return ROCKSDB_NAMESPACE::Histograms::FLUSH_TIME; + case 0x21: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_KEY_SIZE; + case 0x22: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_VALUE_SIZE; + case 0x23: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_WRITE_MICROS; + case 0x24: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_GET_MICROS; + case 0x25: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_MULTIGET_MICROS; + case 0x26: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_SEEK_MICROS; + case 0x27: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_NEXT_MICROS; + case 0x28: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_PREV_MICROS; + case 0x29: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_WRITE_MICROS; + case 0x2A: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_READ_MICROS; + case 0x2B: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_BLOB_FILE_SYNC_MICROS; + case 0x2C: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_GC_MICROS; + case 0x2D: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_COMPRESSION_MICROS; + case 0x2E: + return ROCKSDB_NAMESPACE::Histograms::BLOB_DB_DECOMPRESSION_MICROS; + case 0x2F: + return ROCKSDB_NAMESPACE::Histograms:: + NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL; + case 0x30: + return ROCKSDB_NAMESPACE::Histograms::NUM_DATA_BLOCKS_READ_PER_LEVEL; + case 0x31: + return ROCKSDB_NAMESPACE::Histograms::NUM_SST_READ_PER_LEVEL; + case 0x32: + return ROCKSDB_NAMESPACE::Histograms:: + ERROR_HANDLER_AUTORESUME_RETRY_COUNT; + case 0x33: + return ROCKSDB_NAMESPACE::Histograms::ASYNC_READ_BYTES; + case 0x34: + return ROCKSDB_NAMESPACE::Histograms::POLL_WAIT_MICROS; + case 0x35: + return ROCKSDB_NAMESPACE::Histograms::PREFETCHED_BYTES_DISCARDED; + case 0x36: + return ROCKSDB_NAMESPACE::Histograms::MULTIGET_IO_BATCH_SIZE; + case 0x37: + return ROCKSDB_NAMESPACE::Histograms::NUM_LEVEL_READ_PER_MULTIGET; + case 0x38: + return ROCKSDB_NAMESPACE::Histograms::ASYNC_PREFETCH_ABORT_MICROS; + case 0x1F: + // 0x1F for backwards compatibility on current minor version. + return ROCKSDB_NAMESPACE::Histograms::HISTOGRAM_ENUM_MAX; + + default: + // undefined/default + return ROCKSDB_NAMESPACE::Histograms::DB_GET; + } + } +}; + +// The portal class for org.rocksdb.StatsLevel +class StatsLevelJni { + public: + // Returns the equivalent org.rocksdb.StatsLevel for the provided + // C++ ROCKSDB_NAMESPACE::StatsLevel enum + static jbyte toJavaStatsLevel( + const ROCKSDB_NAMESPACE::StatsLevel& stats_level) { + switch (stats_level) { + case ROCKSDB_NAMESPACE::StatsLevel::kExceptDetailedTimers: + return 0x0; + case ROCKSDB_NAMESPACE::StatsLevel::kExceptTimeForMutex: + return 0x1; + case ROCKSDB_NAMESPACE::StatsLevel::kAll: + return 0x2; + + default: + // undefined/default + return 0x0; + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::StatsLevel enum for the + // provided Java org.rocksdb.StatsLevel + static ROCKSDB_NAMESPACE::StatsLevel toCppStatsLevel(jbyte jstats_level) { + switch (jstats_level) { + case 0x0: + return ROCKSDB_NAMESPACE::StatsLevel::kExceptDetailedTimers; + case 0x1: + return ROCKSDB_NAMESPACE::StatsLevel::kExceptTimeForMutex; + case 0x2: + return ROCKSDB_NAMESPACE::StatsLevel::kAll; + + default: + // undefined/default + return ROCKSDB_NAMESPACE::StatsLevel::kExceptDetailedTimers; + } + } +}; + +// The portal class for org.rocksdb.RateLimiterMode +class RateLimiterModeJni { + public: + // Returns the equivalent org.rocksdb.RateLimiterMode for the provided + // C++ ROCKSDB_NAMESPACE::RateLimiter::Mode enum + static jbyte toJavaRateLimiterMode( + const ROCKSDB_NAMESPACE::RateLimiter::Mode& rate_limiter_mode) { + switch (rate_limiter_mode) { + case ROCKSDB_NAMESPACE::RateLimiter::Mode::kReadsOnly: + return 0x0; + case ROCKSDB_NAMESPACE::RateLimiter::Mode::kWritesOnly: + return 0x1; + case ROCKSDB_NAMESPACE::RateLimiter::Mode::kAllIo: + return 0x2; + + default: + // undefined/default + return 0x1; + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::RateLimiter::Mode enum for + // the provided Java org.rocksdb.RateLimiterMode + static ROCKSDB_NAMESPACE::RateLimiter::Mode toCppRateLimiterMode( + jbyte jrate_limiter_mode) { + switch (jrate_limiter_mode) { + case 0x0: + return ROCKSDB_NAMESPACE::RateLimiter::Mode::kReadsOnly; + case 0x1: + return ROCKSDB_NAMESPACE::RateLimiter::Mode::kWritesOnly; + case 0x2: + return ROCKSDB_NAMESPACE::RateLimiter::Mode::kAllIo; + + default: + // undefined/default + return ROCKSDB_NAMESPACE::RateLimiter::Mode::kWritesOnly; + } + } +}; + +// The portal class for org.rocksdb.MemoryUsageType +class MemoryUsageTypeJni { + public: + // Returns the equivalent org.rocksdb.MemoryUsageType for the provided + // C++ ROCKSDB_NAMESPACE::MemoryUtil::UsageType enum + static jbyte toJavaMemoryUsageType( + const ROCKSDB_NAMESPACE::MemoryUtil::UsageType& usage_type) { + switch (usage_type) { + case ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kMemTableTotal: + return 0x0; + case ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kMemTableUnFlushed: + return 0x1; + case ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kTableReadersTotal: + return 0x2; + case ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kCacheTotal: + return 0x3; + default: + // undefined: use kNumUsageTypes + return 0x4; + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::MemoryUtil::UsageType enum + // for the provided Java org.rocksdb.MemoryUsageType + static ROCKSDB_NAMESPACE::MemoryUtil::UsageType toCppMemoryUsageType( + jbyte usage_type) { + switch (usage_type) { + case 0x0: + return ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kMemTableTotal; + case 0x1: + return ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kMemTableUnFlushed; + case 0x2: + return ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kTableReadersTotal; + case 0x3: + return ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kCacheTotal; + default: + // undefined/default: use kNumUsageTypes + return ROCKSDB_NAMESPACE::MemoryUtil::UsageType::kNumUsageTypes; + } + } +}; + +// The portal class for org.rocksdb.Transaction +class TransactionJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.Transaction + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/Transaction"); + } + + /** + * Create a new Java org.rocksdb.Transaction.WaitingTransactions object + * + * @param env A pointer to the Java environment + * @param jtransaction A Java org.rocksdb.Transaction object + * @param column_family_id The id of the column family + * @param key The key + * @param transaction_ids The transaction ids + * + * @return A reference to a Java + * org.rocksdb.Transaction.WaitingTransactions object, + * or nullptr if an an exception occurs + */ + static jobject newWaitingTransactions( + JNIEnv* env, jobject jtransaction, const uint32_t column_family_id, + const std::string& key, + const std::vector& transaction_ids) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID( + jclazz, "newWaitingTransactions", + "(JLjava/lang/String;[J)Lorg/rocksdb/Transaction$WaitingTransactions;"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jstring jkey = env->NewStringUTF(key.c_str()); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + const size_t len = transaction_ids.size(); + jlongArray jtransaction_ids = env->NewLongArray(static_cast(len)); + if (jtransaction_ids == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jkey); + return nullptr; + } + + jboolean is_copy; + jlong* body = env->GetLongArrayElements(jtransaction_ids, &is_copy); + if (body == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jkey); + env->DeleteLocalRef(jtransaction_ids); + return nullptr; + } + for (size_t i = 0; i < len; ++i) { + body[i] = static_cast(transaction_ids[i]); + } + env->ReleaseLongArrayElements(jtransaction_ids, body, + is_copy == JNI_TRUE ? 0 : JNI_ABORT); + + jobject jwaiting_transactions = env->CallObjectMethod( + jtransaction, mid, static_cast(column_family_id), jkey, + jtransaction_ids); + if (env->ExceptionCheck()) { + // exception thrown: InstantiationException or OutOfMemoryError + env->DeleteLocalRef(jkey); + env->DeleteLocalRef(jtransaction_ids); + return nullptr; + } + + return jwaiting_transactions; + } +}; + +// The portal class for org.rocksdb.TransactionDB +class TransactionDBJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.TransactionDB + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/TransactionDB"); + } + + /** + * Create a new Java org.rocksdb.TransactionDB.DeadlockInfo object + * + * @param env A pointer to the Java environment + * @param jtransaction A Java org.rocksdb.Transaction object + * @param column_family_id The id of the column family + * @param key The key + * @param transaction_ids The transaction ids + * + * @return A reference to a Java + * org.rocksdb.Transaction.WaitingTransactions object, + * or nullptr if an an exception occurs + */ + static jobject newDeadlockInfo( + JNIEnv* env, jobject jtransaction_db, + const ROCKSDB_NAMESPACE::TransactionID transaction_id, + const uint32_t column_family_id, const std::string& waiting_key, + const bool exclusive) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID( + jclazz, "newDeadlockInfo", + "(JJLjava/lang/String;Z)Lorg/rocksdb/TransactionDB$DeadlockInfo;"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jstring jwaiting_key = env->NewStringUTF(waiting_key.c_str()); + if (jwaiting_key == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + // resolve the column family id to a ColumnFamilyHandle + jobject jdeadlock_info = env->CallObjectMethod( + jtransaction_db, mid, transaction_id, + static_cast(column_family_id), jwaiting_key, exclusive); + if (env->ExceptionCheck()) { + // exception thrown: InstantiationException or OutOfMemoryError + env->DeleteLocalRef(jwaiting_key); + return nullptr; + } + + return jdeadlock_info; + } +}; + +// The portal class for org.rocksdb.TxnDBWritePolicy +class TxnDBWritePolicyJni { + public: + // Returns the equivalent org.rocksdb.TxnDBWritePolicy for the provided + // C++ ROCKSDB_NAMESPACE::TxnDBWritePolicy enum + static jbyte toJavaTxnDBWritePolicy( + const ROCKSDB_NAMESPACE::TxnDBWritePolicy& txndb_write_policy) { + switch (txndb_write_policy) { + case ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_COMMITTED: + return 0x0; + case ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_PREPARED: + return 0x1; + case ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_UNPREPARED: + return 0x2; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::TxnDBWritePolicy enum for the + // provided Java org.rocksdb.TxnDBWritePolicy + static ROCKSDB_NAMESPACE::TxnDBWritePolicy toCppTxnDBWritePolicy( + jbyte jtxndb_write_policy) { + switch (jtxndb_write_policy) { + case 0x0: + return ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_COMMITTED; + case 0x1: + return ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_PREPARED; + case 0x2: + return ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_UNPREPARED; + default: + // undefined/default + return ROCKSDB_NAMESPACE::TxnDBWritePolicy::WRITE_COMMITTED; + } + } +}; + +// The portal class for org.rocksdb.TransactionDB.KeyLockInfo +class KeyLockInfoJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.TransactionDB.KeyLockInfo + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/TransactionDB$KeyLockInfo"); + } + + /** + * Create a new Java org.rocksdb.TransactionDB.KeyLockInfo object + * with the same properties as the provided C++ ROCKSDB_NAMESPACE::KeyLockInfo + * object + * + * @param env A pointer to the Java environment + * @param key_lock_info The ROCKSDB_NAMESPACE::KeyLockInfo object + * + * @return A reference to a Java + * org.rocksdb.TransactionDB.KeyLockInfo object, + * or nullptr if an an exception occurs + */ + static jobject construct( + JNIEnv* env, const ROCKSDB_NAMESPACE::KeyLockInfo& key_lock_info) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = + env->GetMethodID(jclazz, "", "(Ljava/lang/String;[JZ)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jstring jkey = env->NewStringUTF(key_lock_info.key.c_str()); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + const jsize jtransaction_ids_len = + static_cast(key_lock_info.ids.size()); + jlongArray jtransactions_ids = env->NewLongArray(jtransaction_ids_len); + if (jtransactions_ids == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jkey); + return nullptr; + } + + const jobject jkey_lock_info = env->NewObject( + jclazz, mid, jkey, jtransactions_ids, key_lock_info.exclusive); + if (jkey_lock_info == nullptr) { + // exception thrown: InstantiationException or OutOfMemoryError + env->DeleteLocalRef(jtransactions_ids); + env->DeleteLocalRef(jkey); + return nullptr; + } + + return jkey_lock_info; + } +}; + +// The portal class for org.rocksdb.TransactionDB.DeadlockInfo +class DeadlockInfoJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.TransactionDB.DeadlockInfo + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/TransactionDB$DeadlockInfo"); + } +}; + +// The portal class for org.rocksdb.TransactionDB.DeadlockPath +class DeadlockPathJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.TransactionDB.DeadlockPath + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/TransactionDB$DeadlockPath"); + } + + /** + * Create a new Java org.rocksdb.TransactionDB.DeadlockPath object + * + * @param env A pointer to the Java environment + * + * @return A reference to a Java + * org.rocksdb.TransactionDB.DeadlockPath object, + * or nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, const jobjectArray jdeadlock_infos, + const bool limit_exceeded) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", "([LDeadlockInfo;Z)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + const jobject jdeadlock_path = + env->NewObject(jclazz, mid, jdeadlock_infos, limit_exceeded); + if (jdeadlock_path == nullptr) { + // exception thrown: InstantiationException or OutOfMemoryError + return nullptr; + } + + return jdeadlock_path; + } +}; + +class AbstractTableFilterJni + : public RocksDBNativeClass< + const ROCKSDB_NAMESPACE::TableFilterJniCallback*, + AbstractTableFilterJni> { + public: + /** + * Get the Java Method: TableFilter#filter(TableProperties) + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getFilterMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "filter", "(Lorg/rocksdb/TableProperties;)Z"); + assert(mid != nullptr); + return mid; + } + + private: + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/TableFilter"); + } +}; + +class TablePropertiesJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.TableProperties object. + * + * @param env A pointer to the Java environment + * @param table_properties A Cpp table properties object + * + * @return A reference to a Java org.rocksdb.TableProperties object, or + * nullptr if an an exception occurs + */ + static jobject fromCppTableProperties( + JNIEnv* env, const ROCKSDB_NAMESPACE::TableProperties& table_properties) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID( + jclazz, "", + "(JJJJJJJJJJJJJJJJJJJJJJ[BLjava/lang/String;Ljava/lang/String;Ljava/" + "lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/" + "String;Ljava/util/Map;Ljava/util/Map;)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jbyteArray jcolumn_family_name = ROCKSDB_NAMESPACE::JniUtil::copyBytes( + env, table_properties.column_family_name); + if (jcolumn_family_name == nullptr) { + // exception occurred creating java string + return nullptr; + } + + jstring jfilter_policy_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &table_properties.filter_policy_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + return nullptr; + } + + jstring jcomparator_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &table_properties.comparator_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + return nullptr; + } + + jstring jmerge_operator_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &table_properties.merge_operator_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + return nullptr; + } + + jstring jprefix_extractor_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &table_properties.prefix_extractor_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + env->DeleteLocalRef(jmerge_operator_name); + return nullptr; + } + + jstring jproperty_collectors_names = + ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &table_properties.property_collectors_names, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + env->DeleteLocalRef(jmerge_operator_name); + env->DeleteLocalRef(jprefix_extractor_name); + return nullptr; + } + + jstring jcompression_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &table_properties.compression_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + env->DeleteLocalRef(jmerge_operator_name); + env->DeleteLocalRef(jprefix_extractor_name); + env->DeleteLocalRef(jproperty_collectors_names); + return nullptr; + } + + // Map + jobject juser_collected_properties = + ROCKSDB_NAMESPACE::HashMapJni::fromCppMap( + env, &table_properties.user_collected_properties); + if (env->ExceptionCheck()) { + // exception occurred creating java map + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + env->DeleteLocalRef(jmerge_operator_name); + env->DeleteLocalRef(jprefix_extractor_name); + env->DeleteLocalRef(jproperty_collectors_names); + env->DeleteLocalRef(jcompression_name); + return nullptr; + } + + // Map + jobject jreadable_properties = ROCKSDB_NAMESPACE::HashMapJni::fromCppMap( + env, &table_properties.readable_properties); + if (env->ExceptionCheck()) { + // exception occurred creating java map + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfilter_policy_name); + env->DeleteLocalRef(jcomparator_name); + env->DeleteLocalRef(jmerge_operator_name); + env->DeleteLocalRef(jprefix_extractor_name); + env->DeleteLocalRef(jproperty_collectors_names); + env->DeleteLocalRef(jcompression_name); + env->DeleteLocalRef(juser_collected_properties); + return nullptr; + } + + jobject jtable_properties = env->NewObject( + jclazz, mid, static_cast(table_properties.data_size), + static_cast(table_properties.index_size), + static_cast(table_properties.index_partitions), + static_cast(table_properties.top_level_index_size), + static_cast(table_properties.index_key_is_user_key), + static_cast(table_properties.index_value_is_delta_encoded), + static_cast(table_properties.filter_size), + static_cast(table_properties.raw_key_size), + static_cast(table_properties.raw_value_size), + static_cast(table_properties.num_data_blocks), + static_cast(table_properties.num_entries), + static_cast(table_properties.num_deletions), + static_cast(table_properties.num_merge_operands), + static_cast(table_properties.num_range_deletions), + static_cast(table_properties.format_version), + static_cast(table_properties.fixed_key_len), + static_cast(table_properties.column_family_id), + static_cast(table_properties.creation_time), + static_cast(table_properties.oldest_key_time), + static_cast( + table_properties.slow_compression_estimated_data_size), + static_cast( + table_properties.fast_compression_estimated_data_size), + static_cast( + table_properties.external_sst_file_global_seqno_offset), + jcolumn_family_name, jfilter_policy_name, jcomparator_name, + jmerge_operator_name, jprefix_extractor_name, + jproperty_collectors_names, jcompression_name, + juser_collected_properties, jreadable_properties); + + if (env->ExceptionCheck()) { + return nullptr; + } + + return jtable_properties; + } + + private: + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/TableProperties"); + } +}; + +class ColumnFamilyDescriptorJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.ColumnFamilyDescriptor + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/ColumnFamilyDescriptor"); + } + + /** + * Create a new Java org.rocksdb.ColumnFamilyDescriptor object with the same + * properties as the provided C++ ROCKSDB_NAMESPACE::ColumnFamilyDescriptor + * object + * + * @param env A pointer to the Java environment + * @param cfd A pointer to ROCKSDB_NAMESPACE::ColumnFamilyDescriptor object + * + * @return A reference to a Java org.rocksdb.ColumnFamilyDescriptor object, or + * nullptr if an an exception occurs + */ + static jobject construct(JNIEnv* env, ColumnFamilyDescriptor* cfd) { + jbyteArray jcf_name = JniUtil::copyBytes(env, cfd->name); + jobject cfopts = ColumnFamilyOptionsJni::construct(env, &(cfd->options)); + + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", + "([BLorg/rocksdb/ColumnFamilyOptions;)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + env->DeleteLocalRef(jcf_name); + return nullptr; + } + + jobject jcfd = env->NewObject(jclazz, mid, jcf_name, cfopts); + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jcf_name); + return nullptr; + } + + return jcfd; + } + + /** + * Get the Java Method: ColumnFamilyDescriptor#columnFamilyName + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getColumnFamilyNameMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "columnFamilyName", "()[B"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: ColumnFamilyDescriptor#columnFamilyOptions + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getColumnFamilyOptionsMethod(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID( + jclazz, "columnFamilyOptions", "()Lorg/rocksdb/ColumnFamilyOptions;"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.IndexType +class IndexTypeJni { + public: + // Returns the equivalent org.rocksdb.IndexType for the provided + // C++ ROCKSDB_NAMESPACE::IndexType enum + static jbyte toJavaIndexType( + const ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType& index_type) { + switch (index_type) { + case ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType::kBinarySearch: + return 0x0; + case ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType::kHashSearch: + return 0x1; + case ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType:: + kTwoLevelIndexSearch: + return 0x2; + case ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType:: + kBinarySearchWithFirstKey: + return 0x3; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::IndexType enum for the + // provided Java org.rocksdb.IndexType + static ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType toCppIndexType( + jbyte jindex_type) { + switch (jindex_type) { + case 0x0: + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType:: + kBinarySearch; + case 0x1: + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType:: + kHashSearch; + case 0x2: + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType:: + kTwoLevelIndexSearch; + case 0x3: + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType:: + kBinarySearchWithFirstKey; + default: + // undefined/default + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexType:: + kBinarySearch; + } + } +}; + +// The portal class for org.rocksdb.DataBlockIndexType +class DataBlockIndexTypeJni { + public: + // Returns the equivalent org.rocksdb.DataBlockIndexType for the provided + // C++ ROCKSDB_NAMESPACE::DataBlockIndexType enum + static jbyte toJavaDataBlockIndexType( + const ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType& + index_type) { + switch (index_type) { + case ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType:: + kDataBlockBinarySearch: + return 0x0; + case ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType:: + kDataBlockBinaryAndHash: + return 0x1; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::DataBlockIndexType enum for + // the provided Java org.rocksdb.DataBlockIndexType + static ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType + toCppDataBlockIndexType(jbyte jindex_type) { + switch (jindex_type) { + case 0x0: + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType:: + kDataBlockBinarySearch; + case 0x1: + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType:: + kDataBlockBinaryAndHash; + default: + // undefined/default + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::DataBlockIndexType:: + kDataBlockBinarySearch; + } + } +}; + +// The portal class for org.rocksdb.ChecksumType +class ChecksumTypeJni { + public: + // Returns the equivalent org.rocksdb.ChecksumType for the provided + // C++ ROCKSDB_NAMESPACE::ChecksumType enum + static jbyte toJavaChecksumType( + const ROCKSDB_NAMESPACE::ChecksumType& checksum_type) { + switch (checksum_type) { + case ROCKSDB_NAMESPACE::ChecksumType::kNoChecksum: + return 0x0; + case ROCKSDB_NAMESPACE::ChecksumType::kCRC32c: + return 0x1; + case ROCKSDB_NAMESPACE::ChecksumType::kxxHash: + return 0x2; + case ROCKSDB_NAMESPACE::ChecksumType::kxxHash64: + return 0x3; + case ROCKSDB_NAMESPACE::ChecksumType::kXXH3: + return 0x4; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::ChecksumType enum for the + // provided Java org.rocksdb.ChecksumType + static ROCKSDB_NAMESPACE::ChecksumType toCppChecksumType( + jbyte jchecksum_type) { + switch (jchecksum_type) { + case 0x0: + return ROCKSDB_NAMESPACE::ChecksumType::kNoChecksum; + case 0x1: + return ROCKSDB_NAMESPACE::ChecksumType::kCRC32c; + case 0x2: + return ROCKSDB_NAMESPACE::ChecksumType::kxxHash; + case 0x3: + return ROCKSDB_NAMESPACE::ChecksumType::kxxHash64; + case 0x4: + return ROCKSDB_NAMESPACE::ChecksumType::kXXH3; + default: + // undefined/default + return ROCKSDB_NAMESPACE::ChecksumType::kCRC32c; + } + } +}; + +// The portal class for org.rocksdb.IndexShorteningMode +class IndexShorteningModeJni { + public: + // Returns the equivalent org.rocksdb.IndexShorteningMode for the provided + // C++ ROCKSDB_NAMESPACE::IndexShorteningMode enum + static jbyte toJavaIndexShorteningMode( + const ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexShorteningMode& + index_shortening_mode) { + switch (index_shortening_mode) { + case ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexShorteningMode:: + kNoShortening: + return 0x0; + case ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexShorteningMode:: + kShortenSeparators: + return 0x1; + case ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexShorteningMode:: + kShortenSeparatorsAndSuccessor: + return 0x2; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::IndexShorteningMode enum for + // the provided Java org.rocksdb.IndexShorteningMode + static ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexShorteningMode + toCppIndexShorteningMode(jbyte jindex_shortening_mode) { + switch (jindex_shortening_mode) { + case 0x0: + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexShorteningMode:: + kNoShortening; + case 0x1: + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexShorteningMode:: + kShortenSeparators; + case 0x2: + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexShorteningMode:: + kShortenSeparatorsAndSuccessor; + default: + // undefined/default + return ROCKSDB_NAMESPACE::BlockBasedTableOptions::IndexShorteningMode:: + kShortenSeparators; + } + } +}; + +// The portal class for org.rocksdb.Priority +class PriorityJni { + public: + // Returns the equivalent org.rocksdb.Priority for the provided + // C++ ROCKSDB_NAMESPACE::Env::Priority enum + static jbyte toJavaPriority( + const ROCKSDB_NAMESPACE::Env::Priority& priority) { + switch (priority) { + case ROCKSDB_NAMESPACE::Env::Priority::BOTTOM: + return 0x0; + case ROCKSDB_NAMESPACE::Env::Priority::LOW: + return 0x1; + case ROCKSDB_NAMESPACE::Env::Priority::HIGH: + return 0x2; + case ROCKSDB_NAMESPACE::Env::Priority::TOTAL: + return 0x3; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::env::Priority enum for the + // provided Java org.rocksdb.Priority + static ROCKSDB_NAMESPACE::Env::Priority toCppPriority(jbyte jpriority) { + switch (jpriority) { + case 0x0: + return ROCKSDB_NAMESPACE::Env::Priority::BOTTOM; + case 0x1: + return ROCKSDB_NAMESPACE::Env::Priority::LOW; + case 0x2: + return ROCKSDB_NAMESPACE::Env::Priority::HIGH; + case 0x3: + return ROCKSDB_NAMESPACE::Env::Priority::TOTAL; + default: + // undefined/default + return ROCKSDB_NAMESPACE::Env::Priority::LOW; + } + } +}; + +// The portal class for org.rocksdb.ThreadType +class ThreadTypeJni { + public: + // Returns the equivalent org.rocksdb.ThreadType for the provided + // C++ ROCKSDB_NAMESPACE::ThreadStatus::ThreadType enum + static jbyte toJavaThreadType( + const ROCKSDB_NAMESPACE::ThreadStatus::ThreadType& thread_type) { + switch (thread_type) { + case ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::HIGH_PRIORITY: + return 0x0; + case ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::LOW_PRIORITY: + return 0x1; + case ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::USER: + return 0x2; + case ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::BOTTOM_PRIORITY: + return 0x3; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::ThreadStatus::ThreadType enum + // for the provided Java org.rocksdb.ThreadType + static ROCKSDB_NAMESPACE::ThreadStatus::ThreadType toCppThreadType( + jbyte jthread_type) { + switch (jthread_type) { + case 0x0: + return ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::HIGH_PRIORITY; + case 0x1: + return ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::LOW_PRIORITY; + case 0x2: + return ThreadStatus::ThreadType::USER; + case 0x3: + return ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::BOTTOM_PRIORITY; + default: + // undefined/default + return ROCKSDB_NAMESPACE::ThreadStatus::ThreadType::LOW_PRIORITY; + } + } +}; + +// The portal class for org.rocksdb.OperationType +class OperationTypeJni { + public: + // Returns the equivalent org.rocksdb.OperationType for the provided + // C++ ROCKSDB_NAMESPACE::ThreadStatus::OperationType enum + static jbyte toJavaOperationType( + const ROCKSDB_NAMESPACE::ThreadStatus::OperationType& operation_type) { + switch (operation_type) { + case ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_UNKNOWN: + return 0x0; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_COMPACTION: + return 0x1; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_FLUSH: + return 0x2; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::ThreadStatus::OperationType + // enum for the provided Java org.rocksdb.OperationType + static ROCKSDB_NAMESPACE::ThreadStatus::OperationType toCppOperationType( + jbyte joperation_type) { + switch (joperation_type) { + case 0x0: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_UNKNOWN; + case 0x1: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_COMPACTION; + case 0x2: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_FLUSH; + default: + // undefined/default + return ROCKSDB_NAMESPACE::ThreadStatus::OperationType::OP_UNKNOWN; + } + } +}; + +// The portal class for org.rocksdb.OperationStage +class OperationStageJni { + public: + // Returns the equivalent org.rocksdb.OperationStage for the provided + // C++ ROCKSDB_NAMESPACE::ThreadStatus::OperationStage enum + static jbyte toJavaOperationStage( + const ROCKSDB_NAMESPACE::ThreadStatus::OperationStage& operation_stage) { + switch (operation_stage) { + case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::STAGE_UNKNOWN: + return 0x0; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::STAGE_FLUSH_RUN: + return 0x1; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_FLUSH_WRITE_L0: + return 0x2; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_COMPACTION_PREPARE: + return 0x3; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_COMPACTION_RUN: + return 0x4; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_COMPACTION_PROCESS_KV: + return 0x5; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_COMPACTION_INSTALL: + return 0x6; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_COMPACTION_SYNC_FILE: + return 0x7; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_PICK_MEMTABLES_TO_FLUSH: + return 0x8; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_MEMTABLE_ROLLBACK: + return 0x9; + case ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS: + return 0xA; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::ThreadStatus::OperationStage + // enum for the provided Java org.rocksdb.OperationStage + static ROCKSDB_NAMESPACE::ThreadStatus::OperationStage toCppOperationStage( + jbyte joperation_stage) { + switch (joperation_stage) { + case 0x0: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::STAGE_UNKNOWN; + case 0x1: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::STAGE_FLUSH_RUN; + case 0x2: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_FLUSH_WRITE_L0; + case 0x3: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_COMPACTION_PREPARE; + case 0x4: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_COMPACTION_RUN; + case 0x5: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_COMPACTION_PROCESS_KV; + case 0x6: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_COMPACTION_INSTALL; + case 0x7: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_COMPACTION_SYNC_FILE; + case 0x8: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_PICK_MEMTABLES_TO_FLUSH; + case 0x9: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_MEMTABLE_ROLLBACK; + case 0xA: + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage:: + STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS; + default: + // undefined/default + return ROCKSDB_NAMESPACE::ThreadStatus::OperationStage::STAGE_UNKNOWN; + } + } +}; + +// The portal class for org.rocksdb.StateType +class StateTypeJni { + public: + // Returns the equivalent org.rocksdb.StateType for the provided + // C++ ROCKSDB_NAMESPACE::ThreadStatus::StateType enum + static jbyte toJavaStateType( + const ROCKSDB_NAMESPACE::ThreadStatus::StateType& state_type) { + switch (state_type) { + case ROCKSDB_NAMESPACE::ThreadStatus::StateType::STATE_UNKNOWN: + return 0x0; + case ROCKSDB_NAMESPACE::ThreadStatus::StateType::STATE_MUTEX_WAIT: + return 0x1; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::ThreadStatus::StateType enum + // for the provided Java org.rocksdb.StateType + static ROCKSDB_NAMESPACE::ThreadStatus::StateType toCppStateType( + jbyte jstate_type) { + switch (jstate_type) { + case 0x0: + return ROCKSDB_NAMESPACE::ThreadStatus::StateType::STATE_UNKNOWN; + case 0x1: + return ROCKSDB_NAMESPACE::ThreadStatus::StateType::STATE_MUTEX_WAIT; + default: + // undefined/default + return ROCKSDB_NAMESPACE::ThreadStatus::StateType::STATE_UNKNOWN; + } + } +}; + +// The portal class for org.rocksdb.ThreadStatus +class ThreadStatusJni : public JavaClass { + public: + /** + * Get the Java Class org.rocksdb.ThreadStatus + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/ThreadStatus"); + } + + /** + * Create a new Java org.rocksdb.ThreadStatus object with the same + * properties as the provided C++ ROCKSDB_NAMESPACE::ThreadStatus object + * + * @param env A pointer to the Java environment + * @param thread_status A pointer to ROCKSDB_NAMESPACE::ThreadStatus object + * + * @return A reference to a Java org.rocksdb.ColumnFamilyOptions object, or + * nullptr if an an exception occurs + */ + static jobject construct( + JNIEnv* env, const ROCKSDB_NAMESPACE::ThreadStatus* thread_status) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID( + jclazz, "", "(JBLjava/lang/String;Ljava/lang/String;BJB[JB)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jstring jdb_name = + JniUtil::toJavaString(env, &(thread_status->db_name), true); + if (env->ExceptionCheck()) { + // an error occurred + return nullptr; + } + + jstring jcf_name = + JniUtil::toJavaString(env, &(thread_status->cf_name), true); + if (env->ExceptionCheck()) { + // an error occurred + env->DeleteLocalRef(jdb_name); + return nullptr; + } + + // long[] + const jsize len = static_cast( + ROCKSDB_NAMESPACE::ThreadStatus::kNumOperationProperties); + jlongArray joperation_properties = env->NewLongArray(len); + if (joperation_properties == nullptr) { + // an exception occurred + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + return nullptr; + } + jboolean is_copy; + jlong* body = env->GetLongArrayElements(joperation_properties, &is_copy); + if (body == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + env->DeleteLocalRef(joperation_properties); + return nullptr; + } + for (size_t i = 0; i < len; ++i) { + body[i] = static_cast(thread_status->op_properties[i]); + } + env->ReleaseLongArrayElements(joperation_properties, body, + is_copy == JNI_TRUE ? 0 : JNI_ABORT); + + jobject jcfd = env->NewObject( + jclazz, mid, static_cast(thread_status->thread_id), + ThreadTypeJni::toJavaThreadType(thread_status->thread_type), jdb_name, + jcf_name, + OperationTypeJni::toJavaOperationType(thread_status->operation_type), + static_cast(thread_status->op_elapsed_micros), + OperationStageJni::toJavaOperationStage(thread_status->operation_stage), + joperation_properties, + StateTypeJni::toJavaStateType(thread_status->state_type)); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + env->DeleteLocalRef(joperation_properties); + return nullptr; + } + + // cleanup + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + env->DeleteLocalRef(joperation_properties); + + return jcfd; + } +}; + +// The portal class for org.rocksdb.CompactionStyle +class CompactionStyleJni { + public: + // Returns the equivalent org.rocksdb.CompactionStyle for the provided + // C++ ROCKSDB_NAMESPACE::CompactionStyle enum + static jbyte toJavaCompactionStyle( + const ROCKSDB_NAMESPACE::CompactionStyle& compaction_style) { + switch (compaction_style) { + case ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleLevel: + return 0x0; + case ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleUniversal: + return 0x1; + case ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleFIFO: + return 0x2; + case ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleNone: + return 0x3; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::CompactionStyle enum for the + // provided Java org.rocksdb.CompactionStyle + static ROCKSDB_NAMESPACE::CompactionStyle toCppCompactionStyle( + jbyte jcompaction_style) { + switch (jcompaction_style) { + case 0x0: + return ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleLevel; + case 0x1: + return ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleUniversal; + case 0x2: + return ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleFIFO; + case 0x3: + return ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleNone; + default: + // undefined/default + return ROCKSDB_NAMESPACE::CompactionStyle::kCompactionStyleLevel; + } + } +}; + +// The portal class for org.rocksdb.CompactionReason +class CompactionReasonJni { + public: + // Returns the equivalent org.rocksdb.CompactionReason for the provided + // C++ ROCKSDB_NAMESPACE::CompactionReason enum + static jbyte toJavaCompactionReason( + const ROCKSDB_NAMESPACE::CompactionReason& compaction_reason) { + switch (compaction_reason) { + case ROCKSDB_NAMESPACE::CompactionReason::kUnknown: + return 0x0; + case ROCKSDB_NAMESPACE::CompactionReason::kLevelL0FilesNum: + return 0x1; + case ROCKSDB_NAMESPACE::CompactionReason::kLevelMaxLevelSize: + return 0x2; + case ROCKSDB_NAMESPACE::CompactionReason::kUniversalSizeAmplification: + return 0x3; + case ROCKSDB_NAMESPACE::CompactionReason::kUniversalSizeRatio: + return 0x4; + case ROCKSDB_NAMESPACE::CompactionReason::kUniversalSortedRunNum: + return 0x5; + case ROCKSDB_NAMESPACE::CompactionReason::kFIFOMaxSize: + return 0x6; + case ROCKSDB_NAMESPACE::CompactionReason::kFIFOReduceNumFiles: + return 0x7; + case ROCKSDB_NAMESPACE::CompactionReason::kFIFOTtl: + return 0x8; + case ROCKSDB_NAMESPACE::CompactionReason::kManualCompaction: + return 0x9; + case ROCKSDB_NAMESPACE::CompactionReason::kFilesMarkedForCompaction: + return 0x10; + case ROCKSDB_NAMESPACE::CompactionReason::kBottommostFiles: + return 0x0A; + case ROCKSDB_NAMESPACE::CompactionReason::kTtl: + return 0x0B; + case ROCKSDB_NAMESPACE::CompactionReason::kFlush: + return 0x0C; + case ROCKSDB_NAMESPACE::CompactionReason::kExternalSstIngestion: + return 0x0D; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::CompactionReason enum for the + // provided Java org.rocksdb.CompactionReason + static ROCKSDB_NAMESPACE::CompactionReason toCppCompactionReason( + jbyte jcompaction_reason) { + switch (jcompaction_reason) { + case 0x0: + return ROCKSDB_NAMESPACE::CompactionReason::kUnknown; + case 0x1: + return ROCKSDB_NAMESPACE::CompactionReason::kLevelL0FilesNum; + case 0x2: + return ROCKSDB_NAMESPACE::CompactionReason::kLevelMaxLevelSize; + case 0x3: + return ROCKSDB_NAMESPACE::CompactionReason::kUniversalSizeAmplification; + case 0x4: + return ROCKSDB_NAMESPACE::CompactionReason::kUniversalSizeRatio; + case 0x5: + return ROCKSDB_NAMESPACE::CompactionReason::kUniversalSortedRunNum; + case 0x6: + return ROCKSDB_NAMESPACE::CompactionReason::kFIFOMaxSize; + case 0x7: + return ROCKSDB_NAMESPACE::CompactionReason::kFIFOReduceNumFiles; + case 0x8: + return ROCKSDB_NAMESPACE::CompactionReason::kFIFOTtl; + case 0x9: + return ROCKSDB_NAMESPACE::CompactionReason::kManualCompaction; + case 0x10: + return ROCKSDB_NAMESPACE::CompactionReason::kFilesMarkedForCompaction; + case 0x0A: + return ROCKSDB_NAMESPACE::CompactionReason::kBottommostFiles; + case 0x0B: + return ROCKSDB_NAMESPACE::CompactionReason::kTtl; + case 0x0C: + return ROCKSDB_NAMESPACE::CompactionReason::kFlush; + case 0x0D: + return ROCKSDB_NAMESPACE::CompactionReason::kExternalSstIngestion; + case 0x0E: + return ROCKSDB_NAMESPACE::CompactionReason::kPeriodicCompaction; + case 0x0F: + return ROCKSDB_NAMESPACE::CompactionReason::kChangeTemperature; + default: + // undefined/default + return ROCKSDB_NAMESPACE::CompactionReason::kUnknown; + } + } +}; + +// The portal class for org.rocksdb.WalFileType +class WalFileTypeJni { + public: + // Returns the equivalent org.rocksdb.WalFileType for the provided + // C++ ROCKSDB_NAMESPACE::WalFileType enum + static jbyte toJavaWalFileType( + const ROCKSDB_NAMESPACE::WalFileType& wal_file_type) { + switch (wal_file_type) { + case ROCKSDB_NAMESPACE::WalFileType::kArchivedLogFile: + return 0x0; + case ROCKSDB_NAMESPACE::WalFileType::kAliveLogFile: + return 0x1; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::WalFileType enum for the + // provided Java org.rocksdb.WalFileType + static ROCKSDB_NAMESPACE::WalFileType toCppWalFileType(jbyte jwal_file_type) { + switch (jwal_file_type) { + case 0x0: + return ROCKSDB_NAMESPACE::WalFileType::kArchivedLogFile; + case 0x1: + return ROCKSDB_NAMESPACE::WalFileType::kAliveLogFile; + default: + // undefined/default + return ROCKSDB_NAMESPACE::WalFileType::kAliveLogFile; + } + } +}; + +class LogFileJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.LogFile object. + * + * @param env A pointer to the Java environment + * @param log_file A Cpp log file object + * + * @return A reference to a Java org.rocksdb.LogFile object, or + * nullptr if an an exception occurs + */ + static jobject fromCppLogFile(JNIEnv* env, + ROCKSDB_NAMESPACE::LogFile* log_file) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = + env->GetMethodID(jclazz, "", "(Ljava/lang/String;JBJJ)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + std::string path_name = log_file->PathName(); + jstring jpath_name = + ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &path_name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + return nullptr; + } + + jobject jlog_file = env->NewObject( + jclazz, mid, jpath_name, static_cast(log_file->LogNumber()), + ROCKSDB_NAMESPACE::WalFileTypeJni::toJavaWalFileType(log_file->Type()), + static_cast(log_file->StartSequence()), + static_cast(log_file->SizeFileBytes())); + + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jpath_name); + return nullptr; + } + + // cleanup + env->DeleteLocalRef(jpath_name); + + return jlog_file; + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/LogFile"); + } +}; + +class LiveFileMetaDataJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.LiveFileMetaData object. + * + * @param env A pointer to the Java environment + * @param live_file_meta_data A Cpp live file meta data object + * + * @return A reference to a Java org.rocksdb.LiveFileMetaData object, or + * nullptr if an an exception occurs + */ + static jobject fromCppLiveFileMetaData( + JNIEnv* env, ROCKSDB_NAMESPACE::LiveFileMetaData* live_file_meta_data) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID( + jclazz, "", + "([BILjava/lang/String;Ljava/lang/String;JJJ[B[BJZJJ)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jbyteArray jcolumn_family_name = ROCKSDB_NAMESPACE::JniUtil::copyBytes( + env, live_file_meta_data->column_family_name); + if (jcolumn_family_name == nullptr) { + // exception occurred creating java byte array + return nullptr; + } + + jstring jfile_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &live_file_meta_data->name, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + return nullptr; + } + + jstring jpath = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &live_file_meta_data->db_path, true); + if (env->ExceptionCheck()) { + // exception occurred creating java string + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfile_name); + return nullptr; + } + + jbyteArray jsmallest_key = ROCKSDB_NAMESPACE::JniUtil::copyBytes( + env, live_file_meta_data->smallestkey); + if (jsmallest_key == nullptr) { + // exception occurred creating java byte array + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + return nullptr; + } + + jbyteArray jlargest_key = ROCKSDB_NAMESPACE::JniUtil::copyBytes( + env, live_file_meta_data->largestkey); + if (jlargest_key == nullptr) { + // exception occurred creating java byte array + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + return nullptr; + } + + jobject jlive_file_meta_data = env->NewObject( + jclazz, mid, jcolumn_family_name, + static_cast(live_file_meta_data->level), jfile_name, jpath, + static_cast(live_file_meta_data->size), + static_cast(live_file_meta_data->smallest_seqno), + static_cast(live_file_meta_data->largest_seqno), jsmallest_key, + jlargest_key, + static_cast(live_file_meta_data->num_reads_sampled), + static_cast(live_file_meta_data->being_compacted), + static_cast(live_file_meta_data->num_entries), + static_cast(live_file_meta_data->num_deletions)); + + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + env->DeleteLocalRef(jlargest_key); + return nullptr; + } + + // cleanup + env->DeleteLocalRef(jcolumn_family_name); + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + env->DeleteLocalRef(jlargest_key); + + return jlive_file_meta_data; + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/LiveFileMetaData"); + } +}; + +class SstFileMetaDataJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.SstFileMetaData object. + * + * @param env A pointer to the Java environment + * @param sst_file_meta_data A Cpp sst file meta data object + * + * @return A reference to a Java org.rocksdb.SstFileMetaData object, or + * nullptr if an an exception occurs + */ + static jobject fromCppSstFileMetaData( + JNIEnv* env, + const ROCKSDB_NAMESPACE::SstFileMetaData* sst_file_meta_data) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID( + jclazz, "", "(Ljava/lang/String;Ljava/lang/String;JJJ[B[BJZJJ)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jstring jfile_name = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &sst_file_meta_data->name, true); + if (jfile_name == nullptr) { + // exception occurred creating java byte array + return nullptr; + } + + jstring jpath = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &sst_file_meta_data->db_path, true); + if (jpath == nullptr) { + // exception occurred creating java byte array + env->DeleteLocalRef(jfile_name); + return nullptr; + } + + jbyteArray jsmallest_key = ROCKSDB_NAMESPACE::JniUtil::copyBytes( + env, sst_file_meta_data->smallestkey); + if (jsmallest_key == nullptr) { + // exception occurred creating java byte array + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + return nullptr; + } + + jbyteArray jlargest_key = ROCKSDB_NAMESPACE::JniUtil::copyBytes( + env, sst_file_meta_data->largestkey); + if (jlargest_key == nullptr) { + // exception occurred creating java byte array + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + return nullptr; + } + + jobject jsst_file_meta_data = env->NewObject( + jclazz, mid, jfile_name, jpath, + static_cast(sst_file_meta_data->size), + static_cast(sst_file_meta_data->smallest_seqno), + static_cast(sst_file_meta_data->largest_seqno), jsmallest_key, + jlargest_key, static_cast(sst_file_meta_data->num_reads_sampled), + static_cast(sst_file_meta_data->being_compacted), + static_cast(sst_file_meta_data->num_entries), + static_cast(sst_file_meta_data->num_deletions)); + + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + env->DeleteLocalRef(jlargest_key); + return nullptr; + } + + // cleanup + env->DeleteLocalRef(jfile_name); + env->DeleteLocalRef(jpath); + env->DeleteLocalRef(jsmallest_key); + env->DeleteLocalRef(jlargest_key); + + return jsst_file_meta_data; + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/SstFileMetaData"); + } +}; + +class LevelMetaDataJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.LevelMetaData object. + * + * @param env A pointer to the Java environment + * @param level_meta_data A Cpp level meta data object + * + * @return A reference to a Java org.rocksdb.LevelMetaData object, or + * nullptr if an an exception occurs + */ + static jobject fromCppLevelMetaData( + JNIEnv* env, const ROCKSDB_NAMESPACE::LevelMetaData* level_meta_data) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", + "(IJ[Lorg/rocksdb/SstFileMetaData;)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + const jsize jlen = static_cast(level_meta_data->files.size()); + jobjectArray jfiles = + env->NewObjectArray(jlen, SstFileMetaDataJni::getJClass(env), nullptr); + if (jfiles == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + jsize i = 0; + for (auto it = level_meta_data->files.begin(); + it != level_meta_data->files.end(); ++it) { + jobject jfile = SstFileMetaDataJni::fromCppSstFileMetaData(env, &(*it)); + if (jfile == nullptr) { + // exception occurred + env->DeleteLocalRef(jfiles); + return nullptr; + } + env->SetObjectArrayElement(jfiles, i++, jfile); + } + + jobject jlevel_meta_data = + env->NewObject(jclazz, mid, static_cast(level_meta_data->level), + static_cast(level_meta_data->size), jfiles); + + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jfiles); + return nullptr; + } + + // cleanup + env->DeleteLocalRef(jfiles); + + return jlevel_meta_data; + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/LevelMetaData"); + } +}; + +class ColumnFamilyMetaDataJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.ColumnFamilyMetaData object. + * + * @param env A pointer to the Java environment + * @param column_famly_meta_data A Cpp live file meta data object + * + * @return A reference to a Java org.rocksdb.ColumnFamilyMetaData object, or + * nullptr if an an exception occurs + */ + static jobject fromCppColumnFamilyMetaData( + JNIEnv* env, + const ROCKSDB_NAMESPACE::ColumnFamilyMetaData* column_famly_meta_data) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = env->GetMethodID(jclazz, "", + "(JJ[B[Lorg/rocksdb/LevelMetaData;)V"); + if (mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return nullptr; + } + + jbyteArray jname = ROCKSDB_NAMESPACE::JniUtil::copyBytes( + env, column_famly_meta_data->name); + if (jname == nullptr) { + // exception occurred creating java byte array + return nullptr; + } + + const jsize jlen = + static_cast(column_famly_meta_data->levels.size()); + jobjectArray jlevels = + env->NewObjectArray(jlen, LevelMetaDataJni::getJClass(env), nullptr); + if (jlevels == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jname); + return nullptr; + } + + jsize i = 0; + for (auto it = column_famly_meta_data->levels.begin(); + it != column_famly_meta_data->levels.end(); ++it) { + jobject jlevel = LevelMetaDataJni::fromCppLevelMetaData(env, &(*it)); + if (jlevel == nullptr) { + // exception occurred + env->DeleteLocalRef(jname); + env->DeleteLocalRef(jlevels); + return nullptr; + } + env->SetObjectArrayElement(jlevels, i++, jlevel); + } + + jobject jcolumn_family_meta_data = env->NewObject( + jclazz, mid, static_cast(column_famly_meta_data->size), + static_cast(column_famly_meta_data->file_count), jname, jlevels); + + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jname); + env->DeleteLocalRef(jlevels); + return nullptr; + } + + // cleanup + env->DeleteLocalRef(jname); + env->DeleteLocalRef(jlevels); + + return jcolumn_family_meta_data; + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/ColumnFamilyMetaData"); + } +}; + +// The portal class for org.rocksdb.AbstractTraceWriter +class AbstractTraceWriterJni + : public RocksDBNativeClass< + const ROCKSDB_NAMESPACE::TraceWriterJniCallback*, + AbstractTraceWriterJni> { + public: + /** + * Get the Java Class org.rocksdb.AbstractTraceWriter + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/AbstractTraceWriter"); + } + + /** + * Get the Java Method: AbstractTraceWriter#write + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getWriteProxyMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "writeProxy", "(J)S"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractTraceWriter#closeWriter + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getCloseWriterProxyMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "closeWriterProxy", "()S"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractTraceWriter#getFileSize + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getGetFileSizeMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "getFileSize", "()J"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.AbstractWalFilter +class AbstractWalFilterJni + : public RocksDBNativeClass { + public: + /** + * Get the Java Class org.rocksdb.AbstractWalFilter + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/AbstractWalFilter"); + } + + /** + * Get the Java Method: AbstractWalFilter#columnFamilyLogNumberMap + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getColumnFamilyLogNumberMapMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "columnFamilyLogNumberMap", + "(Ljava/util/Map;Ljava/util/Map;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractTraceWriter#logRecordFoundProxy + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getLogRecordFoundProxyMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = env->GetMethodID(jclazz, "logRecordFoundProxy", + "(JLjava/lang/String;JJ)S"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractTraceWriter#name + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID or nullptr if the class or method id could not + * be retrieved + */ + static jmethodID getNameMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + static jmethodID mid = + env->GetMethodID(jclazz, "name", "()Ljava/lang/String;"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.WalProcessingOption +class WalProcessingOptionJni { + public: + // Returns the equivalent org.rocksdb.WalProcessingOption for the provided + // C++ ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption enum + static jbyte toJavaWalProcessingOption( + const ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption& + wal_processing_option) { + switch (wal_processing_option) { + case ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption:: + kContinueProcessing: + return 0x0; + case ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption:: + kIgnoreCurrentRecord: + return 0x1; + case ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::kStopReplay: + return 0x2; + case ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::kCorruptedRecord: + return 0x3; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ + // ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption enum for the provided + // Java org.rocksdb.WalProcessingOption + static ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption + toCppWalProcessingOption(jbyte jwal_processing_option) { + switch (jwal_processing_option) { + case 0x0: + return ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption:: + kContinueProcessing; + case 0x1: + return ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption:: + kIgnoreCurrentRecord; + case 0x2: + return ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption::kStopReplay; + case 0x3: + return ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption:: + kCorruptedRecord; + default: + // undefined/default + return ROCKSDB_NAMESPACE::WalFilter::WalProcessingOption:: + kCorruptedRecord; + } + } +}; + +// The portal class for org.rocksdb.ReusedSynchronisationType +class ReusedSynchronisationTypeJni { + public: + // Returns the equivalent org.rocksdb.ReusedSynchronisationType for the + // provided C++ ROCKSDB_NAMESPACE::ReusedSynchronisationType enum + static jbyte toJavaReusedSynchronisationType( + const ROCKSDB_NAMESPACE::ReusedSynchronisationType& + reused_synchronisation_type) { + switch (reused_synchronisation_type) { + case ROCKSDB_NAMESPACE::ReusedSynchronisationType::MUTEX: + return 0x0; + case ROCKSDB_NAMESPACE::ReusedSynchronisationType::ADAPTIVE_MUTEX: + return 0x1; + case ROCKSDB_NAMESPACE::ReusedSynchronisationType::THREAD_LOCAL: + return 0x2; + default: + return 0x7F; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::ReusedSynchronisationType + // enum for the provided Java org.rocksdb.ReusedSynchronisationType + static ROCKSDB_NAMESPACE::ReusedSynchronisationType + toCppReusedSynchronisationType(jbyte reused_synchronisation_type) { + switch (reused_synchronisation_type) { + case 0x0: + return ROCKSDB_NAMESPACE::ReusedSynchronisationType::MUTEX; + case 0x1: + return ROCKSDB_NAMESPACE::ReusedSynchronisationType::ADAPTIVE_MUTEX; + case 0x2: + return ROCKSDB_NAMESPACE::ReusedSynchronisationType::THREAD_LOCAL; + default: + // undefined/default + return ROCKSDB_NAMESPACE::ReusedSynchronisationType::ADAPTIVE_MUTEX; + } + } +}; +// The portal class for org.rocksdb.SanityLevel +class SanityLevelJni { + public: + // Returns the equivalent org.rocksdb.SanityLevel for the provided + // C++ ROCKSDB_NAMESPACE::ConfigOptions::SanityLevel enum + static jbyte toJavaSanityLevel( + const ROCKSDB_NAMESPACE::ConfigOptions::SanityLevel& sanity_level) { + switch (sanity_level) { + case ROCKSDB_NAMESPACE::ConfigOptions::SanityLevel::kSanityLevelNone: + return 0x0; + case ROCKSDB_NAMESPACE::ConfigOptions::SanityLevel:: + kSanityLevelLooselyCompatible: + return 0x1; + case ROCKSDB_NAMESPACE::ConfigOptions::SanityLevel:: + kSanityLevelExactMatch: + return -0x01; + default: + return -0x01; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::ConfigOptions::SanityLevel + // enum for the provided Java org.rocksdb.SanityLevel + static ROCKSDB_NAMESPACE::ConfigOptions::SanityLevel toCppSanityLevel( + jbyte sanity_level) { + switch (sanity_level) { + case 0x0: + return ROCKSDB_NAMESPACE::ConfigOptions::kSanityLevelNone; + case 0x1: + return ROCKSDB_NAMESPACE::ConfigOptions::kSanityLevelLooselyCompatible; + default: + // undefined/default + return ROCKSDB_NAMESPACE::ConfigOptions::kSanityLevelExactMatch; + } + } +}; + +// The portal class for org.rocksdb.PrepopulateBlobCache +class PrepopulateBlobCacheJni { + public: + // Returns the equivalent org.rocksdb.PrepopulateBlobCache for the provided + // C++ ROCKSDB_NAMESPACE::PrepopulateBlobCache enum + static jbyte toJavaPrepopulateBlobCache( + ROCKSDB_NAMESPACE::PrepopulateBlobCache prepopulate_blob_cache) { + switch (prepopulate_blob_cache) { + case ROCKSDB_NAMESPACE::PrepopulateBlobCache::kDisable: + return 0x0; + case ROCKSDB_NAMESPACE::PrepopulateBlobCache::kFlushOnly: + return 0x1; + default: + return 0x7f; // undefined + } + } + + // Returns the equivalent C++ ROCKSDB_NAMESPACE::PrepopulateBlobCache enum for + // the provided Java org.rocksdb.PrepopulateBlobCache + static ROCKSDB_NAMESPACE::PrepopulateBlobCache toCppPrepopulateBlobCache( + jbyte jprepopulate_blob_cache) { + switch (jprepopulate_blob_cache) { + case 0x0: + return ROCKSDB_NAMESPACE::PrepopulateBlobCache::kDisable; + case 0x1: + return ROCKSDB_NAMESPACE::PrepopulateBlobCache::kFlushOnly; + case 0x7F: + default: + // undefined/default + return ROCKSDB_NAMESPACE::PrepopulateBlobCache::kDisable; + } + } +}; + +// The portal class for org.rocksdb.AbstractListener.EnabledEventCallback +class EnabledEventCallbackJni { + public: + // Returns the set of equivalent C++ + // ROCKSDB_NAMESPACE::EnabledEventCallbackJni::EnabledEventCallback enums for + // the provided Java jenabled_event_callback_values + static std::set toCppEnabledEventCallbacks( + jlong jenabled_event_callback_values) { + std::set enabled_event_callbacks; + for (size_t i = 0; i < EnabledEventCallback::NUM_ENABLED_EVENT_CALLBACK; + ++i) { + if (((1ULL << i) & jenabled_event_callback_values) > 0) { + enabled_event_callbacks.emplace(static_cast(i)); + } + } + return enabled_event_callbacks; + } +}; + +// The portal class for org.rocksdb.AbstractEventListener +class AbstractEventListenerJni + : public RocksDBNativeClass< + const ROCKSDB_NAMESPACE::EventListenerJniCallback*, + AbstractEventListenerJni> { + public: + /** + * Get the Java Class org.rocksdb.AbstractEventListener + * + * @param env A pointer to the Java environment + * + * @return The Java Class or nullptr if one of the + * ClassFormatError, ClassCircularityError, NoClassDefFoundError, + * OutOfMemoryError or ExceptionInInitializerError exceptions is thrown + */ + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/AbstractEventListener"); + } + + /** + * Get the Java Method: AbstractEventListener#onFlushCompletedProxy + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnFlushCompletedProxyMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID(jclazz, "onFlushCompletedProxy", + "(JLorg/rocksdb/FlushJobInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onFlushBeginProxy + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnFlushBeginProxyMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID(jclazz, "onFlushBeginProxy", + "(JLorg/rocksdb/FlushJobInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onTableFileDeleted + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnTableFileDeletedMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID( + jclazz, "onTableFileDeleted", "(Lorg/rocksdb/TableFileDeletionInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onCompactionBeginProxy + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnCompactionBeginProxyMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = + env->GetMethodID(jclazz, "onCompactionBeginProxy", + "(JLorg/rocksdb/CompactionJobInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onCompactionCompletedProxy + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnCompactionCompletedProxyMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = + env->GetMethodID(jclazz, "onCompactionCompletedProxy", + "(JLorg/rocksdb/CompactionJobInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onTableFileCreated + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnTableFileCreatedMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID( + jclazz, "onTableFileCreated", "(Lorg/rocksdb/TableFileCreationInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onTableFileCreationStarted + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnTableFileCreationStartedMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = + env->GetMethodID(jclazz, "onTableFileCreationStarted", + "(Lorg/rocksdb/TableFileCreationBriefInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onMemTableSealed + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnMemTableSealedMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID(jclazz, "onMemTableSealed", + "(Lorg/rocksdb/MemTableInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: + * AbstractEventListener#onColumnFamilyHandleDeletionStarted + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnColumnFamilyHandleDeletionStartedMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = + env->GetMethodID(jclazz, "onColumnFamilyHandleDeletionStarted", + "(Lorg/rocksdb/ColumnFamilyHandle;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onExternalFileIngestedProxy + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnExternalFileIngestedProxyMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = + env->GetMethodID(jclazz, "onExternalFileIngestedProxy", + "(JLorg/rocksdb/ExternalFileIngestionInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onBackgroundError + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnBackgroundErrorProxyMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID(jclazz, "onBackgroundErrorProxy", + "(BLorg/rocksdb/Status;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onStallConditionsChanged + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnStallConditionsChangedMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID(jclazz, "onStallConditionsChanged", + "(Lorg/rocksdb/WriteStallInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onFileReadFinish + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnFileReadFinishMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID( + jclazz, "onFileReadFinish", "(Lorg/rocksdb/FileOperationInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onFileWriteFinish + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnFileWriteFinishMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID( + jclazz, "onFileWriteFinish", "(Lorg/rocksdb/FileOperationInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onFileFlushFinish + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnFileFlushFinishMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID( + jclazz, "onFileFlushFinish", "(Lorg/rocksdb/FileOperationInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onFileSyncFinish + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnFileSyncFinishMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID( + jclazz, "onFileSyncFinish", "(Lorg/rocksdb/FileOperationInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onFileRangeSyncFinish + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnFileRangeSyncFinishMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID( + jclazz, "onFileRangeSyncFinish", "(Lorg/rocksdb/FileOperationInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onFileTruncateFinish + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnFileTruncateFinishMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID( + jclazz, "onFileTruncateFinish", "(Lorg/rocksdb/FileOperationInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onFileCloseFinish + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnFileCloseFinishMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID( + jclazz, "onFileCloseFinish", "(Lorg/rocksdb/FileOperationInfo;)V"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#shouldBeNotifiedOnFileIO + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getShouldBeNotifiedOnFileIOMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = + env->GetMethodID(jclazz, "shouldBeNotifiedOnFileIO", "()Z"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onErrorRecoveryBeginProxy + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnErrorRecoveryBeginProxyMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID(jclazz, "onErrorRecoveryBeginProxy", + "(BLorg/rocksdb/Status;)Z"); + assert(mid != nullptr); + return mid; + } + + /** + * Get the Java Method: AbstractEventListener#onErrorRecoveryCompleted + * + * @param env A pointer to the Java environment + * + * @return The Java Method ID + */ + static jmethodID getOnErrorRecoveryCompletedMethodId(JNIEnv* env) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID mid = env->GetMethodID(jclazz, "onErrorRecoveryCompleted", + "(Lorg/rocksdb/Status;)V"); + assert(mid != nullptr); + return mid; + } +}; + +class FlushJobInfoJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.FlushJobInfo object. + * + * @param env A pointer to the Java environment + * @param flush_job_info A Cpp flush job info object + * + * @return A reference to a Java org.rocksdb.FlushJobInfo object, or + * nullptr if an an exception occurs + */ + static jobject fromCppFlushJobInfo( + JNIEnv* env, const ROCKSDB_NAMESPACE::FlushJobInfo* flush_job_info) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + static jmethodID ctor = getConstructorMethodId(env, jclazz); + assert(ctor != nullptr); + jstring jcf_name = JniUtil::toJavaString(env, &flush_job_info->cf_name); + if (env->ExceptionCheck()) { + return nullptr; + } + jstring jfile_path = JniUtil::toJavaString(env, &flush_job_info->file_path); + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jfile_path); + return nullptr; + } + jobject jtable_properties = TablePropertiesJni::fromCppTableProperties( + env, flush_job_info->table_properties); + if (jtable_properties == nullptr) { + env->DeleteLocalRef(jcf_name); + env->DeleteLocalRef(jfile_path); + return nullptr; + } + return env->NewObject( + jclazz, ctor, static_cast(flush_job_info->cf_id), jcf_name, + jfile_path, static_cast(flush_job_info->thread_id), + static_cast(flush_job_info->job_id), + static_cast(flush_job_info->triggered_writes_slowdown), + static_cast(flush_job_info->triggered_writes_stop), + static_cast(flush_job_info->smallest_seqno), + static_cast(flush_job_info->largest_seqno), jtable_properties, + static_cast(flush_job_info->flush_reason)); + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/FlushJobInfo"); + } + + static jmethodID getConstructorMethodId(JNIEnv* env, jclass clazz) { + return env->GetMethodID(clazz, "", + "(JLjava/lang/String;Ljava/lang/String;JIZZJJLorg/" + "rocksdb/TableProperties;B)V"); + } +}; + +class TableFileDeletionInfoJni : public JavaClass { + public: + /** + * Create a new Java org.rocksdb.TableFileDeletionInfo object. + * + * @param env A pointer to the Java environment + * @param file_del_info A Cpp table file deletion info object + * + * @return A reference to a Java org.rocksdb.TableFileDeletionInfo object, or + * nullptr if an an exception occurs + */ + static jobject fromCppTableFileDeletionInfo( + JNIEnv* env, + const ROCKSDB_NAMESPACE::TableFileDeletionInfo* file_del_info) { + jclass jclazz = getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + static jmethodID ctor = getConstructorMethodId(env, jclazz); + assert(ctor != nullptr); + jstring jdb_name = JniUtil::toJavaString(env, &file_del_info->db_name); + if (env->ExceptionCheck()) { + return nullptr; + } + jobject jstatus = StatusJni::construct(env, file_del_info->status); + if (jstatus == nullptr) { + env->DeleteLocalRef(jdb_name); + return nullptr; + } + return env->NewObject(jclazz, ctor, jdb_name, + JniUtil::toJavaString(env, &file_del_info->file_path), + static_cast(file_del_info->job_id), jstatus); + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/TableFileDeletionInfo"); + } + + static jmethodID getConstructorMethodId(JNIEnv* env, jclass clazz) { + return env->GetMethodID( + clazz, "", + "(Ljava/lang/String;Ljava/lang/String;ILorg/rocksdb/Status;)V"); + } +}; + +class CompactionJobInfoJni : public JavaClass { + public: + static jobject fromCppCompactionJobInfo( + JNIEnv* env, + const ROCKSDB_NAMESPACE::CompactionJobInfo* compaction_job_info) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID ctor = getConstructorMethodId(env, jclazz); + assert(ctor != nullptr); + return env->NewObject(jclazz, ctor, + GET_CPLUSPLUS_POINTER(compaction_job_info)); + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/CompactionJobInfo"); + } + + static jmethodID getConstructorMethodId(JNIEnv* env, jclass clazz) { + return env->GetMethodID(clazz, "", "(J)V"); + } +}; + +class TableFileCreationInfoJni : public JavaClass { + public: + static jobject fromCppTableFileCreationInfo( + JNIEnv* env, const ROCKSDB_NAMESPACE::TableFileCreationInfo* info) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID ctor = getConstructorMethodId(env, jclazz); + assert(ctor != nullptr); + jstring jdb_name = JniUtil::toJavaString(env, &info->db_name); + if (env->ExceptionCheck()) { + return nullptr; + } + jstring jcf_name = JniUtil::toJavaString(env, &info->cf_name); + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jdb_name); + return nullptr; + } + jstring jfile_path = JniUtil::toJavaString(env, &info->file_path); + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + return nullptr; + } + jobject jtable_properties = + TablePropertiesJni::fromCppTableProperties(env, info->table_properties); + if (jtable_properties == nullptr) { + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + return nullptr; + } + jobject jstatus = StatusJni::construct(env, info->status); + if (jstatus == nullptr) { + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + env->DeleteLocalRef(jtable_properties); + return nullptr; + } + return env->NewObject(jclazz, ctor, static_cast(info->file_size), + jtable_properties, jstatus, jdb_name, jcf_name, + jfile_path, static_cast(info->job_id), + static_cast(info->reason)); + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/TableFileCreationInfo"); + } + + static jmethodID getConstructorMethodId(JNIEnv* env, jclass clazz) { + return env->GetMethodID( + clazz, "", + "(JLorg/rocksdb/TableProperties;Lorg/rocksdb/Status;Ljava/lang/" + "String;Ljava/lang/String;Ljava/lang/String;IB)V"); + } +}; + +class TableFileCreationBriefInfoJni : public JavaClass { + public: + static jobject fromCppTableFileCreationBriefInfo( + JNIEnv* env, const ROCKSDB_NAMESPACE::TableFileCreationBriefInfo* info) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID ctor = getConstructorMethodId(env, jclazz); + assert(ctor != nullptr); + jstring jdb_name = JniUtil::toJavaString(env, &info->db_name); + if (env->ExceptionCheck()) { + return nullptr; + } + jstring jcf_name = JniUtil::toJavaString(env, &info->cf_name); + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jdb_name); + return nullptr; + } + jstring jfile_path = JniUtil::toJavaString(env, &info->file_path); + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jdb_name); + env->DeleteLocalRef(jcf_name); + return nullptr; + } + return env->NewObject(jclazz, ctor, jdb_name, jcf_name, jfile_path, + static_cast(info->job_id), + static_cast(info->reason)); + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/TableFileCreationBriefInfo"); + } + + static jmethodID getConstructorMethodId(JNIEnv* env, jclass clazz) { + return env->GetMethodID( + clazz, "", + "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;IB)V"); + } +}; + +class MemTableInfoJni : public JavaClass { + public: + static jobject fromCppMemTableInfo( + JNIEnv* env, const ROCKSDB_NAMESPACE::MemTableInfo* info) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID ctor = getConstructorMethodId(env, jclazz); + assert(ctor != nullptr); + jstring jcf_name = JniUtil::toJavaString(env, &info->cf_name); + if (env->ExceptionCheck()) { + return nullptr; + } + return env->NewObject(jclazz, ctor, jcf_name, + static_cast(info->first_seqno), + static_cast(info->earliest_seqno), + static_cast(info->num_entries), + static_cast(info->num_deletes)); + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/MemTableInfo"); + } + + static jmethodID getConstructorMethodId(JNIEnv* env, jclass clazz) { + return env->GetMethodID(clazz, "", "(Ljava/lang/String;JJJJ)V"); + } +}; + +class ExternalFileIngestionInfoJni : public JavaClass { + public: + static jobject fromCppExternalFileIngestionInfo( + JNIEnv* env, const ROCKSDB_NAMESPACE::ExternalFileIngestionInfo* info) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID ctor = getConstructorMethodId(env, jclazz); + assert(ctor != nullptr); + jstring jcf_name = JniUtil::toJavaString(env, &info->cf_name); + if (env->ExceptionCheck()) { + return nullptr; + } + jstring jexternal_file_path = + JniUtil::toJavaString(env, &info->external_file_path); + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jcf_name); + return nullptr; + } + jstring jinternal_file_path = + JniUtil::toJavaString(env, &info->internal_file_path); + if (env->ExceptionCheck()) { + env->DeleteLocalRef(jcf_name); + env->DeleteLocalRef(jexternal_file_path); + return nullptr; + } + jobject jtable_properties = + TablePropertiesJni::fromCppTableProperties(env, info->table_properties); + if (jtable_properties == nullptr) { + env->DeleteLocalRef(jcf_name); + env->DeleteLocalRef(jexternal_file_path); + env->DeleteLocalRef(jinternal_file_path); + return nullptr; + } + return env->NewObject( + jclazz, ctor, jcf_name, jexternal_file_path, jinternal_file_path, + static_cast(info->global_seqno), jtable_properties); + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/ExternalFileIngestionInfo"); + } + + static jmethodID getConstructorMethodId(JNIEnv* env, jclass clazz) { + return env->GetMethodID(clazz, "", + "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/" + "String;JLorg/rocksdb/TableProperties;)V"); + } +}; + +class WriteStallInfoJni : public JavaClass { + public: + static jobject fromCppWriteStallInfo( + JNIEnv* env, const ROCKSDB_NAMESPACE::WriteStallInfo* info) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID ctor = getConstructorMethodId(env, jclazz); + assert(ctor != nullptr); + jstring jcf_name = JniUtil::toJavaString(env, &info->cf_name); + if (env->ExceptionCheck()) { + return nullptr; + } + return env->NewObject(jclazz, ctor, jcf_name, + static_cast(info->condition.cur), + static_cast(info->condition.prev)); + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/WriteStallInfo"); + } + + static jmethodID getConstructorMethodId(JNIEnv* env, jclass clazz) { + return env->GetMethodID(clazz, "", "(Ljava/lang/String;BB)V"); + } +}; + +class FileOperationInfoJni : public JavaClass { + public: + static jobject fromCppFileOperationInfo( + JNIEnv* env, const ROCKSDB_NAMESPACE::FileOperationInfo* info) { + jclass jclazz = getJClass(env); + assert(jclazz != nullptr); + static jmethodID ctor = getConstructorMethodId(env, jclazz); + assert(ctor != nullptr); + jstring jpath = JniUtil::toJavaString(env, &info->path); + if (env->ExceptionCheck()) { + return nullptr; + } + jobject jstatus = StatusJni::construct(env, info->status); + if (jstatus == nullptr) { + env->DeleteLocalRef(jpath); + return nullptr; + } + return env->NewObject( + jclazz, ctor, jpath, static_cast(info->offset), + static_cast(info->length), + static_cast(info->start_ts.time_since_epoch().count()), + static_cast(info->duration.count()), jstatus); + } + + static jclass getJClass(JNIEnv* env) { + return JavaClass::getJClass(env, "org/rocksdb/FileOperationInfo"); + } + + static jmethodID getConstructorMethodId(JNIEnv* env, jclass clazz) { + return env->GetMethodID(clazz, "", + "(Ljava/lang/String;JJJJLorg/rocksdb/Status;)V"); + } +}; +} // namespace ROCKSDB_NAMESPACE +#endif // JAVA_ROCKSJNI_PORTAL_H_ diff --git a/src/rocksdb/java/rocksjni/ratelimiterjni.cc b/src/rocksdb/java/rocksjni/ratelimiterjni.cc new file mode 100644 index 000000000..7a17f367e --- /dev/null +++ b/src/rocksdb/java/rocksjni/ratelimiterjni.cc @@ -0,0 +1,128 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for RateLimiter. + +#include "include/org_rocksdb_RateLimiter.h" +#include "rocksdb/rate_limiter.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_RateLimiter + * Method: newRateLimiterHandle + * Signature: (JJIBZ)J + */ +jlong Java_org_rocksdb_RateLimiter_newRateLimiterHandle( + JNIEnv* /*env*/, jclass /*jclazz*/, jlong jrate_bytes_per_second, + jlong jrefill_period_micros, jint jfairness, jbyte jrate_limiter_mode, + jboolean jauto_tune) { + auto rate_limiter_mode = + ROCKSDB_NAMESPACE::RateLimiterModeJni::toCppRateLimiterMode( + jrate_limiter_mode); + auto* sptr_rate_limiter = new std::shared_ptr( + ROCKSDB_NAMESPACE::NewGenericRateLimiter( + static_cast(jrate_bytes_per_second), + static_cast(jrefill_period_micros), + static_cast(jfairness), rate_limiter_mode, jauto_tune)); + + return GET_CPLUSPLUS_POINTER(sptr_rate_limiter); +} + +/* + * Class: org_rocksdb_RateLimiter + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RateLimiter_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* handle = + reinterpret_cast*>( + jhandle); + delete handle; // delete std::shared_ptr +} + +/* + * Class: org_rocksdb_RateLimiter + * Method: setBytesPerSecond + * Signature: (JJ)V + */ +void Java_org_rocksdb_RateLimiter_setBytesPerSecond(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle, + jlong jbytes_per_second) { + reinterpret_cast*>(handle) + ->get() + ->SetBytesPerSecond(jbytes_per_second); +} + +/* + * Class: org_rocksdb_RateLimiter + * Method: getBytesPerSecond + * Signature: (J)J + */ +jlong Java_org_rocksdb_RateLimiter_getBytesPerSecond(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + return reinterpret_cast*>( + handle) + ->get() + ->GetBytesPerSecond(); +} + +/* + * Class: org_rocksdb_RateLimiter + * Method: request + * Signature: (JJ)V + */ +void Java_org_rocksdb_RateLimiter_request(JNIEnv* /*env*/, jobject /*jobj*/, + jlong handle, jlong jbytes) { + reinterpret_cast*>(handle) + ->get() + ->Request(jbytes, ROCKSDB_NAMESPACE::Env::IO_TOTAL); +} + +/* + * Class: org_rocksdb_RateLimiter + * Method: getSingleBurstBytes + * Signature: (J)J + */ +jlong Java_org_rocksdb_RateLimiter_getSingleBurstBytes(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + return reinterpret_cast*>( + handle) + ->get() + ->GetSingleBurstBytes(); +} + +/* + * Class: org_rocksdb_RateLimiter + * Method: getTotalBytesThrough + * Signature: (J)J + */ +jlong Java_org_rocksdb_RateLimiter_getTotalBytesThrough(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + return reinterpret_cast*>( + handle) + ->get() + ->GetTotalBytesThrough(); +} + +/* + * Class: org_rocksdb_RateLimiter + * Method: getTotalRequests + * Signature: (J)J + */ +jlong Java_org_rocksdb_RateLimiter_getTotalRequests(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + return reinterpret_cast*>( + handle) + ->get() + ->GetTotalRequests(); +} diff --git a/src/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc b/src/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc new file mode 100644 index 000000000..c0b09e151 --- /dev/null +++ b/src/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc @@ -0,0 +1,24 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include + +#include "include/org_rocksdb_RemoveEmptyValueCompactionFilter.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "utilities/compaction_filters/remove_emptyvalue_compactionfilter.h" + +/* + * Class: org_rocksdb_RemoveEmptyValueCompactionFilter + * Method: createNewRemoveEmptyValueCompactionFilter0 + * Signature: ()J + */ +jlong Java_org_rocksdb_RemoveEmptyValueCompactionFilter_createNewRemoveEmptyValueCompactionFilter0( + JNIEnv* /*env*/, jclass /*jcls*/) { + auto* compaction_filter = + new ROCKSDB_NAMESPACE::RemoveEmptyValueCompactionFilter(); + + // set the native handle to our native compaction filter + return GET_CPLUSPLUS_POINTER(compaction_filter); +} diff --git a/src/rocksdb/java/rocksjni/restorejni.cc b/src/rocksdb/java/rocksjni/restorejni.cc new file mode 100644 index 000000000..aadc86128 --- /dev/null +++ b/src/rocksdb/java/rocksjni/restorejni.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling C++ ROCKSDB_NAMESPACE::RestoreOptions methods +// from Java side. + +#include +#include +#include + +#include + +#include "include/org_rocksdb_RestoreOptions.h" +#include "rocksdb/utilities/backup_engine.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" +/* + * Class: org_rocksdb_RestoreOptions + * Method: newRestoreOptions + * Signature: (Z)J + */ +jlong Java_org_rocksdb_RestoreOptions_newRestoreOptions( + JNIEnv* /*env*/, jclass /*jcls*/, jboolean keep_log_files) { + auto* ropt = new ROCKSDB_NAMESPACE::RestoreOptions(keep_log_files); + return GET_CPLUSPLUS_POINTER(ropt); +} + +/* + * Class: org_rocksdb_RestoreOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RestoreOptions_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* ropt = reinterpret_cast(jhandle); + assert(ropt); + delete ropt; +} diff --git a/src/rocksdb/java/rocksjni/rocks_callback_object.cc b/src/rocksdb/java/rocksjni/rocks_callback_object.cc new file mode 100644 index 000000000..35513e151 --- /dev/null +++ b/src/rocksdb/java/rocksjni/rocks_callback_object.cc @@ -0,0 +1,30 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// JNI Callbacks from C++ to sub-classes or org.rocksdb.RocksCallbackObject + +#include + +#include "include/org_rocksdb_RocksCallbackObject.h" +#include "jnicallback.h" + +/* + * Class: org_rocksdb_RocksCallbackObject + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RocksCallbackObject_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + // TODO(AR) is deleting from the super class JniCallback OK, or must we delete + // the subclass? Example hierarchies: + // 1) Comparator -> BaseComparatorJniCallback + JniCallback -> + // DirectComparatorJniCallback 2) Comparator -> BaseComparatorJniCallback + + // JniCallback -> ComparatorJniCallback + // I think this is okay, as Comparator and JniCallback both have virtual + // destructors... + delete reinterpret_cast(handle); +} diff --git a/src/rocksdb/java/rocksjni/rocksdb_exception_test.cc b/src/rocksdb/java/rocksjni/rocksdb_exception_test.cc new file mode 100644 index 000000000..67e62f726 --- /dev/null +++ b/src/rocksdb/java/rocksjni/rocksdb_exception_test.cc @@ -0,0 +1,81 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include + +#include "include/org_rocksdb_RocksDBExceptionTest.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_RocksDBExceptionTest + * Method: raiseException + * Signature: ()V + */ +void Java_org_rocksdb_RocksDBExceptionTest_raiseException(JNIEnv* env, + jobject /*jobj*/) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, + std::string("test message")); +} + +/* + * Class: org_rocksdb_RocksDBExceptionTest + * Method: raiseExceptionWithStatusCode + * Signature: ()V + */ +void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionWithStatusCode( + JNIEnv* env, jobject /*jobj*/) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, "test message", ROCKSDB_NAMESPACE::Status::NotSupported()); +} + +/* + * Class: org_rocksdb_RocksDBExceptionTest + * Method: raiseExceptionNoMsgWithStatusCode + * Signature: ()V + */ +void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionNoMsgWithStatusCode( + JNIEnv* env, jobject /*jobj*/) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::NotSupported()); +} + +/* + * Class: org_rocksdb_RocksDBExceptionTest + * Method: raiseExceptionWithStatusCodeSubCode + * Signature: ()V + */ +void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionWithStatusCodeSubCode( + JNIEnv* env, jobject /*jobj*/) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, "test message", + ROCKSDB_NAMESPACE::Status::TimedOut( + ROCKSDB_NAMESPACE::Status::SubCode::kLockTimeout)); +} + +/* + * Class: org_rocksdb_RocksDBExceptionTest + * Method: raiseExceptionNoMsgWithStatusCodeSubCode + * Signature: ()V + */ +void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionNoMsgWithStatusCodeSubCode( + JNIEnv* env, jobject /*jobj*/) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::TimedOut( + ROCKSDB_NAMESPACE::Status::SubCode::kLockTimeout)); +} + +/* + * Class: org_rocksdb_RocksDBExceptionTest + * Method: raiseExceptionWithStatusCodeState + * Signature: ()V + */ +void Java_org_rocksdb_RocksDBExceptionTest_raiseExceptionWithStatusCodeState( + JNIEnv* env, jobject /*jobj*/) { + ROCKSDB_NAMESPACE::Slice state("test state"); + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, "test message", ROCKSDB_NAMESPACE::Status::NotSupported(state)); +} diff --git a/src/rocksdb/java/rocksjni/rocksjni.cc b/src/rocksdb/java/rocksjni/rocksjni.cc new file mode 100644 index 000000000..23fa60467 --- /dev/null +++ b/src/rocksdb/java/rocksjni/rocksjni.cc @@ -0,0 +1,3947 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::DB methods from Java side. + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "include/org_rocksdb_RocksDB.h" +#include "rocksdb/cache.h" +#include "rocksdb/convenience.h" +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "rocksdb/types.h" +#include "rocksdb/version.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +#ifdef min +#undef min +#endif + +jlong rocksdb_open_helper(JNIEnv* env, jlong jopt_handle, jstring jdb_path, + std::function + open_fn) { + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + + auto* opt = reinterpret_cast(jopt_handle); + ROCKSDB_NAMESPACE::DB* db = nullptr; + ROCKSDB_NAMESPACE::Status s = open_fn(*opt, db_path, &db); + + env->ReleaseStringUTFChars(jdb_path, db_path); + + if (s.ok()) { + return GET_CPLUSPLUS_POINTER(db); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return 0; + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: open + * Signature: (JLjava/lang/String;)J + */ +jlong Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2(JNIEnv* env, jclass, + jlong jopt_handle, + jstring jdb_path) { + return rocksdb_open_helper(env, jopt_handle, jdb_path, + (ROCKSDB_NAMESPACE::Status(*)( + const ROCKSDB_NAMESPACE::Options&, + const std::string&, ROCKSDB_NAMESPACE::DB**)) & + ROCKSDB_NAMESPACE::DB::Open); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: openROnly + * Signature: (JLjava/lang/String;Z)J + */ +jlong Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2Z( + JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path, + jboolean jerror_if_wal_file_exists) { + const bool error_if_wal_file_exists = jerror_if_wal_file_exists == JNI_TRUE; + return rocksdb_open_helper( + env, jopt_handle, jdb_path, + [error_if_wal_file_exists](const ROCKSDB_NAMESPACE::Options& options, + const std::string& db_path, + ROCKSDB_NAMESPACE::DB** db) { + return ROCKSDB_NAMESPACE::DB::OpenForReadOnly(options, db_path, db, + error_if_wal_file_exists); + }); +} + +jlongArray rocksdb_open_helper( + JNIEnv* env, jlong jopt_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options, + std::function&, + std::vector*, + ROCKSDB_NAMESPACE::DB**)> + open_fn) { + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + const jsize len_cols = env->GetArrayLength(jcolumn_names); + jlong* jco = env->GetLongArrayElements(jcolumn_options, nullptr); + if (jco == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + std::vector column_families; + jboolean has_exception = JNI_FALSE; + ROCKSDB_NAMESPACE::JniUtil::byteStrings( + env, jcolumn_names, + [](const char* str_data, const size_t str_len) { + return std::string(str_data, str_len); + }, + [&jco, &column_families](size_t idx, std::string cf_name) { + ROCKSDB_NAMESPACE::ColumnFamilyOptions* cf_options = + reinterpret_cast(jco[idx]); + column_families.push_back( + ROCKSDB_NAMESPACE::ColumnFamilyDescriptor(cf_name, *cf_options)); + }, + &has_exception); + + env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT); + + if (has_exception == JNI_TRUE) { + // exception occurred + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + auto* opt = reinterpret_cast(jopt_handle); + std::vector cf_handles; + ROCKSDB_NAMESPACE::DB* db = nullptr; + ROCKSDB_NAMESPACE::Status s = + open_fn(*opt, db_path, column_families, &cf_handles, &db); + + // we have now finished with db_path + env->ReleaseStringUTFChars(jdb_path, db_path); + + // check if open operation was successful + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + const jsize resultsLen = 1 + len_cols; // db handle + column family handles + std::unique_ptr results = + std::unique_ptr(new jlong[resultsLen]); + results[0] = GET_CPLUSPLUS_POINTER(db); + for (int i = 1; i <= len_cols; i++) { + results[i] = GET_CPLUSPLUS_POINTER(cf_handles[i - 1]); + } + + jlongArray jresults = env->NewLongArray(resultsLen); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetLongArrayRegion(jresults, 0, resultsLen, results.get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresults); + return nullptr; + } + + return jresults; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: openROnly + * Signature: (JLjava/lang/String;[[B[JZ)[J + */ +jlongArray Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2_3_3B_3JZ( + JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options, + jboolean jerror_if_wal_file_exists) { + const bool error_if_wal_file_exists = jerror_if_wal_file_exists == JNI_TRUE; + return rocksdb_open_helper( + env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options, + [error_if_wal_file_exists]( + const ROCKSDB_NAMESPACE::DBOptions& options, + const std::string& db_path, + const std::vector& + column_families, + std::vector* handles, + ROCKSDB_NAMESPACE::DB** db) { + return ROCKSDB_NAMESPACE::DB::OpenForReadOnly( + options, db_path, column_families, handles, db, + error_if_wal_file_exists); + }); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: open + * Signature: (JLjava/lang/String;[[B[J)[J + */ +jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J( + JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options) { + return rocksdb_open_helper( + env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options, + (ROCKSDB_NAMESPACE::Status(*)( + const ROCKSDB_NAMESPACE::DBOptions&, const std::string&, + const std::vector&, + std::vector*, + ROCKSDB_NAMESPACE::DB**)) & + ROCKSDB_NAMESPACE::DB::Open); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: openAsSecondary + * Signature: (JLjava/lang/String;Ljava/lang/String;)J + */ +jlong Java_org_rocksdb_RocksDB_openAsSecondary__JLjava_lang_String_2Ljava_lang_String_2( + JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path, + jstring jsecondary_db_path) { + const char* secondary_db_path = + env->GetStringUTFChars(jsecondary_db_path, nullptr); + if (secondary_db_path == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + + jlong db_handle = rocksdb_open_helper( + env, jopt_handle, jdb_path, + [secondary_db_path](const ROCKSDB_NAMESPACE::Options& options, + const std::string& db_path, + ROCKSDB_NAMESPACE::DB** db) { + return ROCKSDB_NAMESPACE::DB::OpenAsSecondary(options, db_path, + secondary_db_path, db); + }); + + // we have now finished with secondary_db_path + env->ReleaseStringUTFChars(jsecondary_db_path, secondary_db_path); + + return db_handle; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: openAsSecondary + * Signature: (JLjava/lang/String;Ljava/lang/String;[[B[J)[J + */ +jlongArray +Java_org_rocksdb_RocksDB_openAsSecondary__JLjava_lang_String_2Ljava_lang_String_2_3_3B_3J( + JNIEnv* env, jclass, jlong jopt_handle, jstring jdb_path, + jstring jsecondary_db_path, jobjectArray jcolumn_names, + jlongArray jcolumn_options) { + const char* secondary_db_path = + env->GetStringUTFChars(jsecondary_db_path, nullptr); + if (secondary_db_path == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + jlongArray jhandles = rocksdb_open_helper( + env, jopt_handle, jdb_path, jcolumn_names, jcolumn_options, + [secondary_db_path]( + const ROCKSDB_NAMESPACE::DBOptions& options, + const std::string& db_path, + const std::vector& + column_families, + std::vector* handles, + ROCKSDB_NAMESPACE::DB** db) { + return ROCKSDB_NAMESPACE::DB::OpenAsSecondary( + options, db_path, secondary_db_path, column_families, handles, db); + }); + + // we have now finished with secondary_db_path + env->ReleaseStringUTFChars(jsecondary_db_path, secondary_db_path); + + return jhandles; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_disposeInternal(JNIEnv*, jobject, jlong jhandle) { + auto* db = reinterpret_cast(jhandle); + assert(db != nullptr); + delete db; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: closeDatabase + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_closeDatabase(JNIEnv* env, jclass, + jlong jhandle) { + auto* db = reinterpret_cast(jhandle); + assert(db != nullptr); + ROCKSDB_NAMESPACE::Status s = db->Close(); + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: listColumnFamilies + * Signature: (JLjava/lang/String;)[[B + */ +jobjectArray Java_org_rocksdb_RocksDB_listColumnFamilies(JNIEnv* env, jclass, + jlong jopt_handle, + jstring jdb_path) { + std::vector column_family_names; + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + auto* opt = reinterpret_cast(jopt_handle); + ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::DB::ListColumnFamilies( + *opt, db_path, &column_family_names); + + env->ReleaseStringUTFChars(jdb_path, db_path); + + jobjectArray jcolumn_family_names = + ROCKSDB_NAMESPACE::JniUtil::stringsBytes(env, column_family_names); + + return jcolumn_family_names; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: createColumnFamily + * Signature: (J[BIJ)J + */ +jlong Java_org_rocksdb_RocksDB_createColumnFamily(JNIEnv* env, jobject, + jlong jhandle, + jbyteArray jcf_name, + jint jcf_name_len, + jlong jcf_options_handle) { + auto* db = reinterpret_cast(jhandle); + jboolean has_exception = JNI_FALSE; + const std::string cf_name = + ROCKSDB_NAMESPACE::JniUtil::byteString( + env, jcf_name, jcf_name_len, + [](const char* str, const size_t len) { + return std::string(str, len); + }, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return 0; + } + auto* cf_options = reinterpret_cast( + jcf_options_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + ROCKSDB_NAMESPACE::Status s = + db->CreateColumnFamily(*cf_options, cf_name, &cf_handle); + if (!s.ok()) { + // error occurred + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return 0; + } + return GET_CPLUSPLUS_POINTER(cf_handle); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: createColumnFamilies + * Signature: (JJ[[B)[J + */ +jlongArray Java_org_rocksdb_RocksDB_createColumnFamilies__JJ_3_3B( + JNIEnv* env, jobject, jlong jhandle, jlong jcf_options_handle, + jobjectArray jcf_names) { + auto* db = reinterpret_cast(jhandle); + auto* cf_options = reinterpret_cast( + jcf_options_handle); + jboolean has_exception = JNI_FALSE; + std::vector cf_names; + ROCKSDB_NAMESPACE::JniUtil::byteStrings( + env, jcf_names, + [](const char* str, const size_t len) { return std::string(str, len); }, + [&cf_names](const size_t, std::string str) { cf_names.push_back(str); }, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return nullptr; + } + + std::vector cf_handles; + ROCKSDB_NAMESPACE::Status s = + db->CreateColumnFamilies(*cf_options, cf_names, &cf_handles); + if (!s.ok()) { + // error occurred + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + jlongArray jcf_handles = ROCKSDB_NAMESPACE::JniUtil::toJPointers< + ROCKSDB_NAMESPACE::ColumnFamilyHandle>(env, cf_handles, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return nullptr; + } + return jcf_handles; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: createColumnFamilies + * Signature: (J[J[[B)[J + */ +jlongArray Java_org_rocksdb_RocksDB_createColumnFamilies__J_3J_3_3B( + JNIEnv* env, jobject, jlong jhandle, jlongArray jcf_options_handles, + jobjectArray jcf_names) { + auto* db = reinterpret_cast(jhandle); + const jsize jlen = env->GetArrayLength(jcf_options_handles); + std::vector cf_descriptors; + cf_descriptors.reserve(jlen); + + jlong* jcf_options_handles_elems = + env->GetLongArrayElements(jcf_options_handles, nullptr); + if (jcf_options_handles_elems == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + // extract the column family descriptors + jboolean has_exception = JNI_FALSE; + for (jsize i = 0; i < jlen; i++) { + auto* cf_options = + reinterpret_cast( + jcf_options_handles_elems[i]); + jbyteArray jcf_name = + static_cast(env->GetObjectArrayElement(jcf_names, i)); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseLongArrayElements(jcf_options_handles, + jcf_options_handles_elems, JNI_ABORT); + return nullptr; + } + const std::string cf_name = + ROCKSDB_NAMESPACE::JniUtil::byteString( + env, jcf_name, + [](const char* str, const size_t len) { + return std::string(str, len); + }, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + env->DeleteLocalRef(jcf_name); + env->ReleaseLongArrayElements(jcf_options_handles, + jcf_options_handles_elems, JNI_ABORT); + return nullptr; + } + + cf_descriptors.push_back( + ROCKSDB_NAMESPACE::ColumnFamilyDescriptor(cf_name, *cf_options)); + + env->DeleteLocalRef(jcf_name); + } + + std::vector cf_handles; + ROCKSDB_NAMESPACE::Status s = + db->CreateColumnFamilies(cf_descriptors, &cf_handles); + + env->ReleaseLongArrayElements(jcf_options_handles, jcf_options_handles_elems, + JNI_ABORT); + + if (!s.ok()) { + // error occurred + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + jlongArray jcf_handles = ROCKSDB_NAMESPACE::JniUtil::toJPointers< + ROCKSDB_NAMESPACE::ColumnFamilyHandle>(env, cf_handles, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return nullptr; + } + return jcf_handles; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: dropColumnFamily + * Signature: (JJ)V; + */ +void Java_org_rocksdb_RocksDB_dropColumnFamily(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jcf_handle) { + auto* db_handle = reinterpret_cast(jdb_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + ROCKSDB_NAMESPACE::Status s = db_handle->DropColumnFamily(cf_handle); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: dropColumnFamilies + * Signature: (J[J)V + */ +void Java_org_rocksdb_RocksDB_dropColumnFamilies( + JNIEnv* env, jobject, jlong jdb_handle, jlongArray jcolumn_family_handles) { + auto* db_handle = reinterpret_cast(jdb_handle); + + std::vector cf_handles; + if (jcolumn_family_handles != nullptr) { + const jsize len_cols = env->GetArrayLength(jcolumn_family_handles); + + jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr); + if (jcfh == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + for (jsize i = 0; i < len_cols; i++) { + auto* cf_handle = + reinterpret_cast(jcfh[i]); + cf_handles.push_back(cf_handle); + } + env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); + } + + ROCKSDB_NAMESPACE::Status s = db_handle->DropColumnFamilies(cf_handles); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +////////////////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::DB::Put + +/** + * @return true if the put succeeded, false if a Java Exception was thrown + */ +bool rocksdb_put_helper(JNIEnv* env, ROCKSDB_NAMESPACE::DB* db, + const ROCKSDB_NAMESPACE::WriteOptions& write_options, + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len) { + jbyte* key = new jbyte[jkey_len]; + env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] key; + return false; + } + + jbyte* value = new jbyte[jval_len]; + env->GetByteArrayRegion(jval, jval_off, jval_len, value); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] value; + delete[] key; + return false; + } + + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), jkey_len); + ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast(value), + jval_len); + + ROCKSDB_NAMESPACE::Status s; + if (cf_handle != nullptr) { + s = db->Put(write_options, cf_handle, key_slice, value_slice); + } else { + // backwards compatibility + s = db->Put(write_options, key_slice, value_slice); + } + + // cleanup + delete[] value; + delete[] key; + + if (s.ok()) { + return true; + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return false; + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: put + * Signature: (J[BII[BII)V + */ +void Java_org_rocksdb_RocksDB_put__J_3BII_3BII(JNIEnv* env, jobject, + jlong jdb_handle, + jbyteArray jkey, jint jkey_off, + jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len) { + auto* db = reinterpret_cast(jdb_handle); + static const ROCKSDB_NAMESPACE::WriteOptions default_write_options = + ROCKSDB_NAMESPACE::WriteOptions(); + rocksdb_put_helper(env, db, default_write_options, nullptr, jkey, jkey_off, + jkey_len, jval, jval_off, jval_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: put + * Signature: (J[BII[BIIJ)V + */ +void Java_org_rocksdb_RocksDB_put__J_3BII_3BIIJ(JNIEnv* env, jobject, + jlong jdb_handle, + jbyteArray jkey, jint jkey_off, + jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + static const ROCKSDB_NAMESPACE::WriteOptions default_write_options = + ROCKSDB_NAMESPACE::WriteOptions(); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_put_helper(env, db, default_write_options, cf_handle, jkey, + jkey_off, jkey_len, jval, jval_off, jval_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: put + * Signature: (JJ[BII[BII)V + */ +void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BII(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_off, + jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + rocksdb_put_helper(env, db, *write_options, nullptr, jkey, jkey_off, jkey_len, + jval, jval_off, jval_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: put + * Signature: (JJ[BII[BIIJ)V + */ +void Java_org_rocksdb_RocksDB_put__JJ_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_put_helper(env, db, *write_options, cf_handle, jkey, jkey_off, + jkey_len, jval, jval_off, jval_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: putDirect + * Signature: (JJLjava/nio/ByteBuffer;IILjava/nio/ByteBuffer;IIJ)V + */ +void Java_org_rocksdb_RocksDB_putDirect( + JNIEnv* env, jobject /*jdb*/, jlong jdb_handle, jlong jwrite_options_handle, + jobject jkey, jint jkey_off, jint jkey_len, jobject jval, jint jval_off, + jint jval_len, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + auto put = [&env, &db, &cf_handle, &write_options]( + ROCKSDB_NAMESPACE::Slice& key, + ROCKSDB_NAMESPACE::Slice& value) { + ROCKSDB_NAMESPACE::Status s; + if (cf_handle == nullptr) { + s = db->Put(*write_options, key, value); + } else { + s = db->Put(*write_options, cf_handle, key, value); + } + if (s.ok()) { + return; + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + }; + ROCKSDB_NAMESPACE::JniUtil::kv_op_direct(put, env, jkey, jkey_off, jkey_len, + jval, jval_off, jval_len); +} + +////////////////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::DB::Delete() + +/** + * @return true if the delete succeeded, false if a Java Exception was thrown + */ +bool rocksdb_delete_helper(JNIEnv* env, ROCKSDB_NAMESPACE::DB* db, + const ROCKSDB_NAMESPACE::WriteOptions& write_options, + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len) { + jbyte* key = new jbyte[jkey_len]; + env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] key; + return false; + } + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), jkey_len); + + ROCKSDB_NAMESPACE::Status s; + if (cf_handle != nullptr) { + s = db->Delete(write_options, cf_handle, key_slice); + } else { + // backwards compatibility + s = db->Delete(write_options, key_slice); + } + + // cleanup + delete[] key; + + if (s.ok()) { + return true; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return false; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: delete + * Signature: (J[BII)V + */ +void Java_org_rocksdb_RocksDB_delete__J_3BII(JNIEnv* env, jobject, + jlong jdb_handle, jbyteArray jkey, + jint jkey_off, jint jkey_len) { + auto* db = reinterpret_cast(jdb_handle); + static const ROCKSDB_NAMESPACE::WriteOptions default_write_options = + ROCKSDB_NAMESPACE::WriteOptions(); + rocksdb_delete_helper(env, db, default_write_options, nullptr, jkey, jkey_off, + jkey_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: delete + * Signature: (J[BIIJ)V + */ +void Java_org_rocksdb_RocksDB_delete__J_3BIIJ(JNIEnv* env, jobject, + jlong jdb_handle, jbyteArray jkey, + jint jkey_off, jint jkey_len, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + static const ROCKSDB_NAMESPACE::WriteOptions default_write_options = + ROCKSDB_NAMESPACE::WriteOptions(); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_delete_helper(env, db, default_write_options, cf_handle, jkey, + jkey_off, jkey_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: delete + * Signature: (JJ[BII)V + */ +void Java_org_rocksdb_RocksDB_delete__JJ_3BII(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jwrite_options, + jbyteArray jkey, jint jkey_off, + jint jkey_len) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options); + rocksdb_delete_helper(env, db, *write_options, nullptr, jkey, jkey_off, + jkey_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: delete + * Signature: (JJ[BIIJ)V + */ +void Java_org_rocksdb_RocksDB_delete__JJ_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options, + jbyteArray jkey, jint jkey_off, jint jkey_len, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_delete_helper(env, db, *write_options, cf_handle, jkey, jkey_off, + jkey_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + } +} + +////////////////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::DB::SingleDelete() +/** + * @return true if the single delete succeeded, false if a Java Exception + * was thrown + */ +bool rocksdb_single_delete_helper( + JNIEnv* env, ROCKSDB_NAMESPACE::DB* db, + const ROCKSDB_NAMESPACE::WriteOptions& write_options, + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle, jbyteArray jkey, + jint jkey_len) { + jbyte* key = new jbyte[jkey_len]; + env->GetByteArrayRegion(jkey, 0, jkey_len, key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] key; + return false; + } + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), jkey_len); + + ROCKSDB_NAMESPACE::Status s; + if (cf_handle != nullptr) { + s = db->SingleDelete(write_options, cf_handle, key_slice); + } else { + // backwards compatibility + s = db->SingleDelete(write_options, key_slice); + } + + delete[] key; + + if (s.ok()) { + return true; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return false; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: singleDelete + * Signature: (J[BI)V + */ +void Java_org_rocksdb_RocksDB_singleDelete__J_3BI(JNIEnv* env, jobject, + jlong jdb_handle, + jbyteArray jkey, + jint jkey_len) { + auto* db = reinterpret_cast(jdb_handle); + static const ROCKSDB_NAMESPACE::WriteOptions default_write_options = + ROCKSDB_NAMESPACE::WriteOptions(); + rocksdb_single_delete_helper(env, db, default_write_options, nullptr, jkey, + jkey_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: singleDelete + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_RocksDB_singleDelete__J_3BIJ(JNIEnv* env, jobject, + jlong jdb_handle, + jbyteArray jkey, + jint jkey_len, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + static const ROCKSDB_NAMESPACE::WriteOptions default_write_options = + ROCKSDB_NAMESPACE::WriteOptions(); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_single_delete_helper(env, db, default_write_options, cf_handle, + jkey, jkey_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: singleDelete + * Signature: (JJ[BIJ)V + */ +void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BI(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jwrite_options, + jbyteArray jkey, + jint jkey_len) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options); + rocksdb_single_delete_helper(env, db, *write_options, nullptr, jkey, + jkey_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: singleDelete + * Signature: (JJ[BIJ)V + */ +void Java_org_rocksdb_RocksDB_singleDelete__JJ_3BIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options, + jbyteArray jkey, jint jkey_len, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_single_delete_helper(env, db, *write_options, cf_handle, jkey, + jkey_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + } +} + +////////////////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::DB::DeleteRange() +/** + * @return true if the delete range succeeded, false if a Java Exception + * was thrown + */ +bool rocksdb_delete_range_helper( + JNIEnv* env, ROCKSDB_NAMESPACE::DB* db, + const ROCKSDB_NAMESPACE::WriteOptions& write_options, + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle, jbyteArray jbegin_key, + jint jbegin_key_off, jint jbegin_key_len, jbyteArray jend_key, + jint jend_key_off, jint jend_key_len) { + jbyte* begin_key = new jbyte[jbegin_key_len]; + env->GetByteArrayRegion(jbegin_key, jbegin_key_off, jbegin_key_len, + begin_key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] begin_key; + return false; + } + ROCKSDB_NAMESPACE::Slice begin_key_slice(reinterpret_cast(begin_key), + jbegin_key_len); + + jbyte* end_key = new jbyte[jend_key_len]; + env->GetByteArrayRegion(jend_key, jend_key_off, jend_key_len, end_key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] begin_key; + delete[] end_key; + return false; + } + ROCKSDB_NAMESPACE::Slice end_key_slice(reinterpret_cast(end_key), + jend_key_len); + + ROCKSDB_NAMESPACE::Status s = + db->DeleteRange(write_options, cf_handle, begin_key_slice, end_key_slice); + + // cleanup + delete[] begin_key; + delete[] end_key; + + if (s.ok()) { + return true; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return false; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: deleteRange + * Signature: (J[BII[BII)V + */ +void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BII( + JNIEnv* env, jobject, jlong jdb_handle, jbyteArray jbegin_key, + jint jbegin_key_off, jint jbegin_key_len, jbyteArray jend_key, + jint jend_key_off, jint jend_key_len) { + auto* db = reinterpret_cast(jdb_handle); + static const ROCKSDB_NAMESPACE::WriteOptions default_write_options = + ROCKSDB_NAMESPACE::WriteOptions(); + rocksdb_delete_range_helper(env, db, default_write_options, nullptr, + jbegin_key, jbegin_key_off, jbegin_key_len, + jend_key, jend_key_off, jend_key_len); +} + +jint rocksdb_get_helper_direct( + JNIEnv* env, ROCKSDB_NAMESPACE::DB* db, + const ROCKSDB_NAMESPACE::ReadOptions& read_options, + ROCKSDB_NAMESPACE::ColumnFamilyHandle* column_family_handle, jobject jkey, + jint jkey_off, jint jkey_len, jobject jval, jint jval_off, jint jval_len, + bool* has_exception) { + static const int kNotFound = -1; + static const int kStatusError = -2; + static const int kArgumentError = -3; + + char* key = reinterpret_cast(env->GetDirectBufferAddress(jkey)); + if (key == nullptr) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, + "Invalid key argument (argument is not a valid direct ByteBuffer)"); + *has_exception = true; + return kArgumentError; + } + if (env->GetDirectBufferCapacity(jkey) < (jkey_off + jkey_len)) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, + "Invalid key argument. Capacity is less than requested region (offset " + "+ length)."); + *has_exception = true; + return kArgumentError; + } + + char* value = reinterpret_cast(env->GetDirectBufferAddress(jval)); + if (value == nullptr) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, + "Invalid value argument (argument is not a valid direct ByteBuffer)"); + *has_exception = true; + return kArgumentError; + } + + if (env->GetDirectBufferCapacity(jval) < (jval_off + jval_len)) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, + "Invalid value argument. Capacity is less than requested region " + "(offset + length)."); + *has_exception = true; + return kArgumentError; + } + + key += jkey_off; + value += jval_off; + + ROCKSDB_NAMESPACE::Slice key_slice(key, jkey_len); + + // TODO(yhchiang): we might save one memory allocation here by adding + // a DB::Get() function which takes preallocated jbyte* as input. + std::string cvalue; + ROCKSDB_NAMESPACE::Status s; + if (column_family_handle != nullptr) { + s = db->Get(read_options, column_family_handle, key_slice, &cvalue); + } else { + // backwards compatibility + s = db->Get(read_options, key_slice, &cvalue); + } + + if (s.IsNotFound()) { + *has_exception = false; + return kNotFound; + } else if (!s.ok()) { + *has_exception = true; + // Here since we are throwing a Java exception from c++ side. + // As a result, c++ does not know calling this function will in fact + // throwing an exception. As a result, the execution flow will + // not stop here, and codes after this throw will still be + // executed. + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + + // Return a dummy const value to avoid compilation error, although + // java side might not have a chance to get the return value :) + return kStatusError; + } + + const jint cvalue_len = static_cast(cvalue.size()); + const jint length = std::min(jval_len, cvalue_len); + + memcpy(value, cvalue.c_str(), length); + + *has_exception = false; + return cvalue_len; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: deleteRange + * Signature: (J[BII[BIIJ)V + */ +void Java_org_rocksdb_RocksDB_deleteRange__J_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jbyteArray jbegin_key, + jint jbegin_key_off, jint jbegin_key_len, jbyteArray jend_key, + jint jend_key_off, jint jend_key_len, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + static const ROCKSDB_NAMESPACE::WriteOptions default_write_options = + ROCKSDB_NAMESPACE::WriteOptions(); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_delete_range_helper(env, db, default_write_options, cf_handle, + jbegin_key, jbegin_key_off, jbegin_key_len, + jend_key, jend_key_off, jend_key_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: deleteRange + * Signature: (JJ[BII[BII)V + */ +void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BII( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options, + jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len, + jbyteArray jend_key, jint jend_key_off, jint jend_key_len) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options); + rocksdb_delete_range_helper(env, db, *write_options, nullptr, jbegin_key, + jbegin_key_off, jbegin_key_len, jend_key, + jend_key_off, jend_key_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: deleteRange + * Signature: (JJ[BII[BIIJ)V + */ +void Java_org_rocksdb_RocksDB_deleteRange__JJ_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options, + jbyteArray jbegin_key, jint jbegin_key_off, jint jbegin_key_len, + jbyteArray jend_key, jint jend_key_off, jint jend_key_len, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_delete_range_helper(env, db, *write_options, cf_handle, jbegin_key, + jbegin_key_off, jbegin_key_len, jend_key, + jend_key_off, jend_key_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getDirect + * Signature: (JJLjava/nio/ByteBuffer;IILjava/nio/ByteBuffer;IIJ)I + */ +jint Java_org_rocksdb_RocksDB_getDirect(JNIEnv* env, jobject /*jdb*/, + jlong jdb_handle, jlong jropt_handle, + jobject jkey, jint jkey_off, + jint jkey_len, jobject jval, + jint jval_off, jint jval_len, + jlong jcf_handle) { + auto* db_handle = reinterpret_cast(jdb_handle); + auto* ro_opt = + reinterpret_cast(jropt_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + bool has_exception = false; + return rocksdb_get_helper_direct( + env, db_handle, + ro_opt == nullptr ? ROCKSDB_NAMESPACE::ReadOptions() : *ro_opt, cf_handle, + jkey, jkey_off, jkey_len, jval, jval_off, jval_len, &has_exception); +} + +////////////////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::DB::Merge + +/** + * @return true if the merge succeeded, false if a Java Exception was thrown + */ +bool rocksdb_merge_helper(JNIEnv* env, ROCKSDB_NAMESPACE::DB* db, + const ROCKSDB_NAMESPACE::WriteOptions& write_options, + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, + jbyteArray jval, jint jval_off, jint jval_len) { + jbyte* key = new jbyte[jkey_len]; + env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] key; + return false; + } + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), jkey_len); + + jbyte* value = new jbyte[jval_len]; + env->GetByteArrayRegion(jval, jval_off, jval_len, value); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] value; + delete[] key; + return false; + } + ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast(value), + jval_len); + + ROCKSDB_NAMESPACE::Status s; + if (cf_handle != nullptr) { + s = db->Merge(write_options, cf_handle, key_slice, value_slice); + } else { + s = db->Merge(write_options, key_slice, value_slice); + } + + // cleanup + delete[] value; + delete[] key; + + if (s.ok()) { + return true; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return false; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: merge + * Signature: (J[BII[BII)V + */ +void Java_org_rocksdb_RocksDB_merge__J_3BII_3BII(JNIEnv* env, jobject, + jlong jdb_handle, + jbyteArray jkey, jint jkey_off, + jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len) { + auto* db = reinterpret_cast(jdb_handle); + static const ROCKSDB_NAMESPACE::WriteOptions default_write_options = + ROCKSDB_NAMESPACE::WriteOptions(); + rocksdb_merge_helper(env, db, default_write_options, nullptr, jkey, jkey_off, + jkey_len, jval, jval_off, jval_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: merge + * Signature: (J[BII[BIIJ)V + */ +void Java_org_rocksdb_RocksDB_merge__J_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jbyteArray jkey, jint jkey_off, + jint jkey_len, jbyteArray jval, jint jval_off, jint jval_len, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + static const ROCKSDB_NAMESPACE::WriteOptions default_write_options = + ROCKSDB_NAMESPACE::WriteOptions(); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_merge_helper(env, db, default_write_options, cf_handle, jkey, + jkey_off, jkey_len, jval, jval_off, jval_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: merge + * Signature: (JJ[BII[BII)V + */ +void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BII( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + rocksdb_merge_helper(env, db, *write_options, nullptr, jkey, jkey_off, + jkey_len, jval, jval_off, jval_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: merge + * Signature: (JJ[BII[BIIJ)V + */ +void Java_org_rocksdb_RocksDB_merge__JJ_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len, jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_merge_helper(env, db, *write_options, cf_handle, jkey, jkey_off, + jkey_len, jval, jval_off, jval_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + } +} + +jlong rocksdb_iterator_helper( + ROCKSDB_NAMESPACE::DB* db, ROCKSDB_NAMESPACE::ReadOptions read_options, + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle) { + ROCKSDB_NAMESPACE::Iterator* iterator = nullptr; + if (cf_handle != nullptr) { + iterator = db->NewIterator(read_options, cf_handle); + } else { + iterator = db->NewIterator(read_options); + } + return GET_CPLUSPLUS_POINTER(iterator); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: deleteDirect + * Signature: (JJLjava/nio/ByteBuffer;IIJ)V + */ +void Java_org_rocksdb_RocksDB_deleteDirect(JNIEnv* env, jobject /*jdb*/, + jlong jdb_handle, + jlong jwrite_options, jobject jkey, + jint jkey_offset, jint jkey_len, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options); + auto* cf_handle = + reinterpret_cast(jcf_handle); + auto remove = [&env, &db, &write_options, + &cf_handle](ROCKSDB_NAMESPACE::Slice& key) { + ROCKSDB_NAMESPACE::Status s; + if (cf_handle == nullptr) { + s = db->Delete(*write_options, key); + } else { + s = db->Delete(*write_options, cf_handle, key); + } + if (s.ok()) { + return; + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_direct(remove, env, jkey, jkey_offset, + jkey_len); +} + +////////////////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::DB::Write +/* + * Class: org_rocksdb_RocksDB + * Method: write0 + * Signature: (JJJ)V + */ +void Java_org_rocksdb_RocksDB_write0(JNIEnv* env, jobject, jlong jdb_handle, + jlong jwrite_options_handle, + jlong jwb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* wb = reinterpret_cast(jwb_handle); + + ROCKSDB_NAMESPACE::Status s = db->Write(*write_options, wb); + + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: write1 + * Signature: (JJJ)V + */ +void Java_org_rocksdb_RocksDB_write1(JNIEnv* env, jobject, jlong jdb_handle, + jlong jwrite_options_handle, + jlong jwbwi_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* wbwi = + reinterpret_cast(jwbwi_handle); + auto* wb = wbwi->GetWriteBatch(); + + ROCKSDB_NAMESPACE::Status s = db->Write(*write_options, wb); + + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +////////////////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::DB::Get + +jbyteArray rocksdb_get_helper( + JNIEnv* env, ROCKSDB_NAMESPACE::DB* db, + const ROCKSDB_NAMESPACE::ReadOptions& read_opt, + ROCKSDB_NAMESPACE::ColumnFamilyHandle* column_family_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len) { + jbyte* key = new jbyte[jkey_len]; + env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] key; + return nullptr; + } + + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), jkey_len); + + std::string value; + ROCKSDB_NAMESPACE::Status s; + if (column_family_handle != nullptr) { + s = db->Get(read_opt, column_family_handle, key_slice, &value); + } else { + // backwards compatibility + s = db->Get(read_opt, key_slice, &value); + } + + // cleanup + delete[] key; + + if (s.IsNotFound()) { + return nullptr; + } + + if (s.ok()) { + jbyteArray jret_value = ROCKSDB_NAMESPACE::JniUtil::copyBytes(env, value); + if (jret_value == nullptr) { + // exception occurred + return nullptr; + } + return jret_value; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (J[BII)[B + */ +jbyteArray Java_org_rocksdb_RocksDB_get__J_3BII(JNIEnv* env, jobject, + jlong jdb_handle, + jbyteArray jkey, jint jkey_off, + jint jkey_len) { + return rocksdb_get_helper( + env, reinterpret_cast(jdb_handle), + ROCKSDB_NAMESPACE::ReadOptions(), nullptr, jkey, jkey_off, jkey_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (J[BIIJ)[B + */ +jbyteArray Java_org_rocksdb_RocksDB_get__J_3BIIJ(JNIEnv* env, jobject, + jlong jdb_handle, + jbyteArray jkey, jint jkey_off, + jint jkey_len, + jlong jcf_handle) { + auto db_handle = reinterpret_cast(jdb_handle); + auto cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + return rocksdb_get_helper(env, db_handle, ROCKSDB_NAMESPACE::ReadOptions(), + cf_handle, jkey, jkey_off, jkey_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + return nullptr; + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (JJ[BII)[B + */ +jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BII(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jropt_handle, + jbyteArray jkey, jint jkey_off, + jint jkey_len) { + return rocksdb_get_helper( + env, reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), nullptr, + jkey, jkey_off, jkey_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (JJ[BIIJ)[B + */ +jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jropt_handle, jbyteArray jkey, + jint jkey_off, jint jkey_len, jlong jcf_handle) { + auto* db_handle = reinterpret_cast(jdb_handle); + auto& ro_opt = + *reinterpret_cast(jropt_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle, jkey, jkey_off, + jkey_len); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + return nullptr; + } +} + +jint rocksdb_get_helper( + JNIEnv* env, ROCKSDB_NAMESPACE::DB* db, + const ROCKSDB_NAMESPACE::ReadOptions& read_options, + ROCKSDB_NAMESPACE::ColumnFamilyHandle* column_family_handle, + jbyteArray jkey, jint jkey_off, jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len, bool* has_exception) { + static const int kNotFound = -1; + static const int kStatusError = -2; + + jbyte* key = new jbyte[jkey_len]; + env->GetByteArrayRegion(jkey, jkey_off, jkey_len, key); + if (env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + delete[] key; + *has_exception = true; + return kStatusError; + } + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), jkey_len); + + // TODO(yhchiang): we might save one memory allocation here by adding + // a DB::Get() function which takes preallocated jbyte* as input. + std::string cvalue; + ROCKSDB_NAMESPACE::Status s; + if (column_family_handle != nullptr) { + s = db->Get(read_options, column_family_handle, key_slice, &cvalue); + } else { + // backwards compatibility + s = db->Get(read_options, key_slice, &cvalue); + } + + // cleanup + delete[] key; + + if (s.IsNotFound()) { + *has_exception = false; + return kNotFound; + } else if (!s.ok()) { + *has_exception = true; + // Here since we are throwing a Java exception from c++ side. + // As a result, c++ does not know calling this function will in fact + // throwing an exception. As a result, the execution flow will + // not stop here, and codes after this throw will still be + // executed. + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + + // Return a dummy const value to avoid compilation error, although + // java side might not have a chance to get the return value :) + return kStatusError; + } + + const jint cvalue_len = static_cast(cvalue.size()); + const jint length = std::min(jval_len, cvalue_len); + + env->SetByteArrayRegion( + jval, jval_off, length, + const_cast(reinterpret_cast(cvalue.c_str()))); + if (env->ExceptionCheck()) { + // exception thrown: OutOfMemoryError + *has_exception = true; + return kStatusError; + } + + *has_exception = false; + return cvalue_len; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (J[BII[BII)I + */ +jint Java_org_rocksdb_RocksDB_get__J_3BII_3BII(JNIEnv* env, jobject, + jlong jdb_handle, + jbyteArray jkey, jint jkey_off, + jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len) { + bool has_exception = false; + return rocksdb_get_helper( + env, reinterpret_cast(jdb_handle), + ROCKSDB_NAMESPACE::ReadOptions(), nullptr, jkey, jkey_off, jkey_len, jval, + jval_off, jval_len, &has_exception); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (J[BII[BIIJ)I + */ +jint Java_org_rocksdb_RocksDB_get__J_3BII_3BIIJ(JNIEnv* env, jobject, + jlong jdb_handle, + jbyteArray jkey, jint jkey_off, + jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len, + jlong jcf_handle) { + auto* db_handle = reinterpret_cast(jdb_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + bool has_exception = false; + return rocksdb_get_helper(env, db_handle, ROCKSDB_NAMESPACE::ReadOptions(), + cf_handle, jkey, jkey_off, jkey_len, jval, + jval_off, jval_len, &has_exception); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + // will never be evaluated + return 0; + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (JJ[BII[BII)I + */ +jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BII(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jropt_handle, + jbyteArray jkey, jint jkey_off, + jint jkey_len, jbyteArray jval, + jint jval_off, jint jval_len) { + bool has_exception = false; + return rocksdb_get_helper( + env, reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), nullptr, + jkey, jkey_off, jkey_len, jval, jval_off, jval_len, &has_exception); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (JJ[BII[BIIJ)I + */ +jint Java_org_rocksdb_RocksDB_get__JJ_3BII_3BIIJ( + JNIEnv* env, jobject, jlong jdb_handle, jlong jropt_handle, jbyteArray jkey, + jint jkey_off, jint jkey_len, jbyteArray jval, jint jval_off, jint jval_len, + jlong jcf_handle) { + auto* db_handle = reinterpret_cast(jdb_handle); + auto& ro_opt = + *reinterpret_cast(jropt_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + bool has_exception = false; + return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle, jkey, jkey_off, + jkey_len, jval, jval_off, jval_len, + &has_exception); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Invalid ColumnFamilyHandle.")); + // will never be evaluated + return 0; + } +} + +inline void multi_get_helper_release_keys(std::vector& keys_to_free) { + auto end = keys_to_free.end(); + for (auto it = keys_to_free.begin(); it != end; ++it) { + delete[] * it; + } + keys_to_free.clear(); +} + +/** + * @brief fill a native array of cf handles from java handles + * + * @param env + * @param cf_handles to fill from the java variants + * @param jcolumn_family_handles + * @return true if the copy succeeds + * @return false if a JNI exception is generated + */ +inline bool cf_handles_from_jcf_handles( + JNIEnv* env, + std::vector& cf_handles, + jlongArray jcolumn_family_handles) { + if (jcolumn_family_handles != nullptr) { + const jsize len_cols = env->GetArrayLength(jcolumn_family_handles); + + jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr); + if (jcfh == nullptr) { + // exception thrown: OutOfMemoryError + jclass exception_cls = (env)->FindClass("java/lang/OutOfMemoryError"); + (env)->ThrowNew(exception_cls, + "Insufficient Memory for CF handle array."); + return false; + } + + for (jsize i = 0; i < len_cols; i++) { + auto* cf_handle = + reinterpret_cast(jcfh[i]); + cf_handles.push_back(cf_handle); + } + env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); + } + return true; +} + +/** + * @brief copy keys from JNI into vector of slices for Rocks API + * + * @param keys to instantiate + * @param jkeys + * @param jkey_offs + * @param jkey_lens + * @return true if the copy succeeds + * @return false if a JNI exception is raised + */ +inline bool keys_from_jkeys(JNIEnv* env, + std::vector& keys, + std::vector& keys_to_free, + jobjectArray jkeys, jintArray jkey_offs, + jintArray jkey_lens) { + jint* jkey_off = env->GetIntArrayElements(jkey_offs, nullptr); + if (jkey_off == nullptr) { + // exception thrown: OutOfMemoryError + jclass exception_cls = (env)->FindClass("java/lang/OutOfMemoryError"); + (env)->ThrowNew(exception_cls, "Insufficient Memory for key offset array."); + return false; + } + + jint* jkey_len = env->GetIntArrayElements(jkey_lens, nullptr); + if (jkey_len == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); + jclass exception_cls = (env)->FindClass("java/lang/OutOfMemoryError"); + (env)->ThrowNew(exception_cls, "Insufficient Memory for key length array."); + return false; + } + + const jsize len_keys = env->GetArrayLength(jkeys); + for (jsize i = 0; i < len_keys; i++) { + jobject jkey = env->GetObjectArrayElement(jkeys, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT); + env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); + multi_get_helper_release_keys(keys_to_free); + jclass exception_cls = (env)->FindClass("java/lang/OutOfMemoryError"); + (env)->ThrowNew(exception_cls, + "Insufficient Memory for key object array."); + return false; + } + + jbyteArray jkey_ba = reinterpret_cast(jkey); + + const jint len_key = jkey_len[i]; + jbyte* key = new jbyte[len_key]; + env->GetByteArrayRegion(jkey_ba, jkey_off[i], len_key, key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] key; + env->DeleteLocalRef(jkey); + env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT); + env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); + multi_get_helper_release_keys(keys_to_free); + jclass exception_cls = + (env)->FindClass("java/lang/ArrayIndexOutOfBoundsException"); + (env)->ThrowNew(exception_cls, "Invalid byte array region index."); + return false; + } + + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), len_key); + keys.push_back(key_slice); + + env->DeleteLocalRef(jkey); + keys_to_free.push_back(key); + } + + // cleanup jkey_off and jken_len + env->ReleaseIntArrayElements(jkey_lens, jkey_len, JNI_ABORT); + env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); + + return true; +} + +inline bool keys_from_bytebuffers(JNIEnv* env, + std::vector& keys, + jobjectArray jkeys, jintArray jkey_offs, + jintArray jkey_lens) { + jint* jkey_off = env->GetIntArrayElements(jkey_offs, nullptr); + if (jkey_off == nullptr) { + // exception thrown: OutOfMemoryError + jclass exception_cls = (env)->FindClass("java/lang/OutOfMemoryError"); + (env)->ThrowNew(exception_cls, "Insufficient Memory for key offset array."); + return false; + } + + jint* jkey_len = env->GetIntArrayElements(jkey_lens, nullptr); + if (jkey_len == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseIntArrayElements(jkey_offs, jkey_off, JNI_ABORT); + jclass exception_cls = (env)->FindClass("java/lang/OutOfMemoryError"); + (env)->ThrowNew(exception_cls, "Insufficient Memory for key length array."); + return false; + } + + const jsize len_keys = env->GetArrayLength(jkeys); + for (jsize i = 0; i < len_keys; i++) { + jobject jkey = env->GetObjectArrayElement(jkeys, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + return false; + } + char* key = reinterpret_cast(env->GetDirectBufferAddress(jkey)); + ROCKSDB_NAMESPACE::Slice key_slice(key + jkey_off[i], jkey_len[i]); + keys.push_back(key_slice); + + env->DeleteLocalRef(jkey); + } + return true; +} + +/** + * cf multi get + * + * @return byte[][] of values or nullptr if an + * exception occurs + */ +jobjectArray multi_get_helper(JNIEnv* env, jobject, ROCKSDB_NAMESPACE::DB* db, + const ROCKSDB_NAMESPACE::ReadOptions& rOpt, + jobjectArray jkeys, jintArray jkey_offs, + jintArray jkey_lens, + jlongArray jcolumn_family_handles) { + std::vector cf_handles; + if (!cf_handles_from_jcf_handles(env, cf_handles, jcolumn_family_handles)) { + return nullptr; + } + + std::vector keys; + std::vector keys_to_free; + if (!keys_from_jkeys(env, keys, keys_to_free, jkeys, jkey_offs, jkey_lens)) { + return nullptr; + } + + std::vector values; + std::vector s; + if (cf_handles.size() == 0) { + s = db->MultiGet(rOpt, keys, &values); + } else { + s = db->MultiGet(rOpt, cf_handles, keys, &values); + } + + // free up allocated byte arrays + multi_get_helper_release_keys(keys_to_free); + + // prepare the results + jobjectArray jresults = ROCKSDB_NAMESPACE::ByteJni::new2dByteArray( + env, static_cast(s.size())); + if (jresults == nullptr) { + // exception occurred + jclass exception_cls = (env)->FindClass("java/lang/OutOfMemoryError"); + (env)->ThrowNew(exception_cls, "Insufficient Memory for results."); + return nullptr; + } + + // add to the jresults + for (std::vector::size_type i = 0; i != s.size(); + i++) { + if (s[i].ok()) { + std::string* value = &values[i]; + const jsize jvalue_len = static_cast(value->size()); + jbyteArray jentry_value = env->NewByteArray(jvalue_len); + if (jentry_value == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion( + jentry_value, 0, static_cast(jvalue_len), + const_cast(reinterpret_cast(value->c_str()))); + if (env->ExceptionCheck()) { + // exception thrown: + // ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jentry_value); + return nullptr; + } + + env->SetObjectArrayElement(jresults, static_cast(i), jentry_value); + if (env->ExceptionCheck()) { + // exception thrown: + // ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jentry_value); + return nullptr; + } + + env->DeleteLocalRef(jentry_value); + } + } + + return jresults; +} + +/** + * cf multi get + * + * fill supplied native buffers, or raise JNI + * exception on a problem + */ + +/** + * @brief multi_get_helper_direct for fast-path multiget (io_uring) on Linux + * + * @param env + * @param db + * @param rOpt read options + * @param jcolumn_family_handles 0, 1, or n column family handles + * @param jkeys + * @param jkey_offsets + * @param jkey_lengths + * @param jvalues byte buffers to receive values + * @param jvalue_sizes returned actual sizes of data values for keys + * @param jstatuses returned java RocksDB status values for per key + */ +void multi_get_helper_direct(JNIEnv* env, jobject, ROCKSDB_NAMESPACE::DB* db, + const ROCKSDB_NAMESPACE::ReadOptions& rOpt, + jlongArray jcolumn_family_handles, + jobjectArray jkeys, jintArray jkey_offsets, + jintArray jkey_lengths, jobjectArray jvalues, + jintArray jvalue_sizes, jobjectArray jstatuses) { + const jsize num_keys = env->GetArrayLength(jkeys); + + std::vector keys; + if (!keys_from_bytebuffers(env, keys, jkeys, jkey_offsets, jkey_lengths)) { + return; + } + + std::vector values(num_keys); + + std::vector cf_handles; + if (!cf_handles_from_jcf_handles(env, cf_handles, jcolumn_family_handles)) { + return; + } + + std::vector s(num_keys); + if (cf_handles.size() == 0) { + // we can use the more efficient call here + auto cf_handle = db->DefaultColumnFamily(); + db->MultiGet(rOpt, cf_handle, num_keys, keys.data(), values.data(), + s.data()); + } else if (cf_handles.size() == 1) { + // we can use the more efficient call here + auto cf_handle = cf_handles[0]; + db->MultiGet(rOpt, cf_handle, num_keys, keys.data(), values.data(), + s.data()); + } else { + // multiple CFs version + db->MultiGet(rOpt, num_keys, cf_handles.data(), keys.data(), values.data(), + s.data()); + } + + // prepare the results + jobjectArray jresults = ROCKSDB_NAMESPACE::ByteJni::new2dByteArray( + env, static_cast(s.size())); + if (jresults == nullptr) { + // exception occurred + jclass exception_cls = (env)->FindClass("java/lang/OutOfMemoryError"); + (env)->ThrowNew(exception_cls, "Insufficient Memory for results."); + return; + } + + std::vector value_size; + for (int i = 0; i < num_keys; i++) { + auto jstatus = ROCKSDB_NAMESPACE::StatusJni::construct(env, s[i]); + if (jstatus == nullptr) { + // exception in context + return; + } + env->SetObjectArrayElement(jstatuses, i, jstatus); + + if (s[i].ok()) { + jobject jvalue_bytebuf = env->GetObjectArrayElement(jvalues, i); + if (env->ExceptionCheck()) { + // ArrayIndexOutOfBoundsException is thrown + return; + } + jlong jvalue_capacity = env->GetDirectBufferCapacity(jvalue_bytebuf); + if (jvalue_capacity == -1) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, + "Invalid value(s) argument (argument is not a valid direct " + "ByteBuffer)"); + return; + } + void* jvalue_address = env->GetDirectBufferAddress(jvalue_bytebuf); + if (jvalue_address == nullptr) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, + "Invalid value(s) argument (argument is not a valid direct " + "ByteBuffer)"); + return; + } + + // record num returned, push back that number, which may be bigger then + // the ByteBuffer supplied. then copy as much as fits in the ByteBuffer. + value_size.push_back(static_cast(values[i].size())); + auto copy_bytes = + std::min(static_cast(values[i].size()), jvalue_capacity); + memcpy(jvalue_address, values[i].data(), copy_bytes); + } else { + // bad status for this + value_size.push_back(0); + } + } + + env->SetIntArrayRegion(jvalue_sizes, 0, num_keys, value_size.data()); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: multiGet + * Signature: (J[[B[I[I)[[B + */ +jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I( + JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys, + jintArray jkey_offs, jintArray jkey_lens) { + return multi_get_helper( + env, jdb, reinterpret_cast(jdb_handle), + ROCKSDB_NAMESPACE::ReadOptions(), jkeys, jkey_offs, jkey_lens, nullptr); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: multiGet + * Signature: (J[[B[I[I[J)[[B + */ +jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3I_3I_3J( + JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys, + jintArray jkey_offs, jintArray jkey_lens, + jlongArray jcolumn_family_handles) { + return multi_get_helper(env, jdb, + reinterpret_cast(jdb_handle), + ROCKSDB_NAMESPACE::ReadOptions(), jkeys, jkey_offs, + jkey_lens, jcolumn_family_handles); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: multiGet + * Signature: (JJ[[B[I[I)[[B + */ +jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens) { + return multi_get_helper( + env, jdb, reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), jkeys, + jkey_offs, jkey_lens, nullptr); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: multiGet + * Signature: (JJ[[B[I[I[J)[[B + */ +jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3I_3I_3J( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jobjectArray jkeys, jintArray jkey_offs, jintArray jkey_lens, + jlongArray jcolumn_family_handles) { + return multi_get_helper( + env, jdb, reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), jkeys, + jkey_offs, jkey_lens, jcolumn_family_handles); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: multiGet + * Signature: + * (JJ[J[Ljava/nio/ByteBuffer;[I[I[Ljava/nio/ByteBuffer;[I[Lorg/rocksdb/Status;)V + */ +void Java_org_rocksdb_RocksDB_multiGet__JJ_3J_3Ljava_nio_ByteBuffer_2_3I_3I_3Ljava_nio_ByteBuffer_2_3I_3Lorg_rocksdb_Status_2( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jlongArray jcolumn_family_handles, jobjectArray jkeys, + jintArray jkey_offsets, jintArray jkey_lengths, jobjectArray jvalues, + jintArray jvalues_sizes, jobjectArray jstatus_objects) { + return multi_get_helper_direct( + env, jdb, reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), + jcolumn_family_handles, jkeys, jkey_offsets, jkey_lengths, jvalues, + jvalues_sizes, jstatus_objects); +} +// private native void +// multiGet(final long dbHandle, final long rOptHandle, +// final long[] columnFamilyHandles, final ByteBuffer[] keysArray, +// final ByteBuffer[] valuesArray); + +////////////////////////////////////////////////////////////////////////////// +// ROCKSDB_NAMESPACE::DB::KeyMayExist +bool key_may_exist_helper(JNIEnv* env, jlong jdb_handle, jlong jcf_handle, + jlong jread_opts_handle, jbyteArray jkey, + jint jkey_offset, jint jkey_len, bool* has_exception, + std::string* value, bool* value_found) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + ROCKSDB_NAMESPACE::ReadOptions read_opts = + jread_opts_handle == 0 + ? ROCKSDB_NAMESPACE::ReadOptions() + : *(reinterpret_cast( + jread_opts_handle)); + + jbyte* key = new jbyte[jkey_len]; + env->GetByteArrayRegion(jkey, jkey_offset, jkey_len, key); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] key; + *has_exception = true; + return false; + } + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), jkey_len); + + const bool exists = + db->KeyMayExist(read_opts, cf_handle, key_slice, value, value_found); + + // cleanup + delete[] key; + + return exists; +} + +bool key_may_exist_direct_helper(JNIEnv* env, jlong jdb_handle, + jlong jcf_handle, jlong jread_opts_handle, + jobject jkey, jint jkey_offset, jint jkey_len, + bool* has_exception, std::string* value, + bool* value_found) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + ROCKSDB_NAMESPACE::ReadOptions read_opts = + jread_opts_handle == 0 + ? ROCKSDB_NAMESPACE::ReadOptions() + : *(reinterpret_cast( + jread_opts_handle)); + + char* key = reinterpret_cast(env->GetDirectBufferAddress(jkey)); + if (key == nullptr) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, + "Invalid key argument (argument is not a valid direct ByteBuffer)"); + *has_exception = true; + return false; + } + if (env->GetDirectBufferCapacity(jkey) < (jkey_offset + jkey_len)) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, + "Invalid key argument. Capacity is less than requested region (offset " + "+ length)."); + *has_exception = true; + return false; + } + + ROCKSDB_NAMESPACE::Slice key_slice(key, jkey_len); + + const bool exists = + db->KeyMayExist(read_opts, cf_handle, key_slice, value, value_found); + + return exists; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: keyMayExist + * Signature: (JJJ[BII)Z + */ +jboolean Java_org_rocksdb_RocksDB_keyMayExist( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jlong jread_opts_handle, jbyteArray jkey, jint jkey_offset, jint jkey_len) { + bool has_exception = false; + std::string value; + bool value_found = false; + + const bool exists = key_may_exist_helper( + env, jdb_handle, jcf_handle, jread_opts_handle, jkey, jkey_offset, + jkey_len, &has_exception, &value, &value_found); + + if (has_exception) { + // java exception already raised + return false; + } + + return static_cast(exists); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: keyMayExistDirect + * Signature: (JJJLjava/nio/ByteBuffer;II)Z + */ +jboolean Java_org_rocksdb_RocksDB_keyMayExistDirect( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jlong jread_opts_handle, jobject jkey, jint jkey_offset, jint jkey_len) { + bool has_exception = false; + std::string value; + bool value_found = false; + + const bool exists = key_may_exist_direct_helper( + env, jdb_handle, jcf_handle, jread_opts_handle, jkey, jkey_offset, + jkey_len, &has_exception, &value, &value_found); + if (has_exception) { + // java exception already raised + return false; + } + + return static_cast(exists); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: keyMayExistDirectFoundValue + * Signature: + * (JJJLjava/nio/ByteBuffer;IILjava/nio/ByteBuffer;II)[J + */ +jintArray Java_org_rocksdb_RocksDB_keyMayExistDirectFoundValue( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jlong jread_opts_handle, jobject jkey, jint jkey_offset, jint jkey_len, + jobject jval, jint jval_offset, jint jval_len) { + char* val_buffer = reinterpret_cast(env->GetDirectBufferAddress(jval)); + if (val_buffer == nullptr) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, + "Invalid value argument (argument is not a valid direct ByteBuffer)"); + return nullptr; + } + + if (env->GetDirectBufferCapacity(jval) < (jval_offset + jval_len)) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, + "Invalid value argument. Capacity is less than requested region " + "(offset + length)."); + return nullptr; + } + + bool has_exception = false; + std::string cvalue; + bool value_found = false; + + const bool exists = key_may_exist_direct_helper( + env, jdb_handle, jcf_handle, jread_opts_handle, jkey, jkey_offset, + jkey_len, &has_exception, &cvalue, &value_found); + + if (has_exception) { + // java exception already raised + return nullptr; + } + + const jint cvalue_len = static_cast(cvalue.size()); + const jint length = std::min(jval_len, cvalue_len); + memcpy(val_buffer + jval_offset, cvalue.c_str(), length); + + // keep consistent with java KeyMayExistEnum.values() + const int kNotExist = 0; + const int kExistsWithoutValue = 1; + const int kExistsWithValue = 2; + + // TODO fix return value/type + // exists/value_found/neither + // cvalue_len + jintArray jresult = env->NewIntArray(2); + const jint jexists = + exists ? (value_found ? kExistsWithValue : kExistsWithoutValue) + : kNotExist; + + env->SetIntArrayRegion(jresult, 0, 1, &jexists); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresult); + return nullptr; + } + env->SetIntArrayRegion(jresult, 1, 1, &cvalue_len); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresult); + return nullptr; + } + + return jresult; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: keyMayExistFoundValue + * Signature: (JJJ[BII)[[B + */ +jobjectArray Java_org_rocksdb_RocksDB_keyMayExistFoundValue( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jlong jread_opts_handle, jbyteArray jkey, jint jkey_offset, jint jkey_len) { + bool has_exception = false; + std::string value; + bool value_found = false; + + const bool exists = key_may_exist_helper( + env, jdb_handle, jcf_handle, jread_opts_handle, jkey, jkey_offset, + jkey_len, &has_exception, &value, &value_found); + + if (has_exception) { + // java exception already raised + return nullptr; + } + + jbyte result_flags[1]; + if (!exists) { + result_flags[0] = 0; + } else if (!value_found) { + result_flags[0] = 1; + } else { + // found + result_flags[0] = 2; + } + + jobjectArray jresults = ROCKSDB_NAMESPACE::ByteJni::new2dByteArray(env, 2); + if (jresults == nullptr) { + // exception occurred + return nullptr; + } + + // prepare the result flag + jbyteArray jresult_flags = env->NewByteArray(1); + if (jresult_flags == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion(jresult_flags, 0, 1, result_flags); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresult_flags); + return nullptr; + } + + env->SetObjectArrayElement(jresults, 0, jresult_flags); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresult_flags); + return nullptr; + } + + env->DeleteLocalRef(jresult_flags); + + if (result_flags[0] == 2) { + // set the value + const jsize jvalue_len = static_cast(value.size()); + jbyteArray jresult_value = env->NewByteArray(jvalue_len); + if (jresult_value == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jresult_value, 0, jvalue_len, + const_cast(reinterpret_cast(value.data()))); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresult_value); + return nullptr; + } + env->SetObjectArrayElement(jresults, 1, jresult_value); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresult_value); + return nullptr; + } + + env->DeleteLocalRef(jresult_value); + } + + return jresults; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: iterator + * Signature: (J)J + */ +jlong Java_org_rocksdb_RocksDB_iterator__J(JNIEnv*, jobject, jlong db_handle) { + auto* db = reinterpret_cast(db_handle); + return rocksdb_iterator_helper(db, ROCKSDB_NAMESPACE::ReadOptions(), nullptr); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: iterator + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_RocksDB_iterator__JJ(JNIEnv*, jobject, jlong db_handle, + jlong jread_options_handle) { + auto* db = reinterpret_cast(db_handle); + auto& read_options = + *reinterpret_cast(jread_options_handle); + return rocksdb_iterator_helper(db, read_options, nullptr); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: iteratorCF + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_RocksDB_iteratorCF__JJ(JNIEnv*, jobject, jlong db_handle, + jlong jcf_handle) { + auto* db = reinterpret_cast(db_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + return rocksdb_iterator_helper(db, ROCKSDB_NAMESPACE::ReadOptions(), + cf_handle); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: iteratorCF + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_RocksDB_iteratorCF__JJJ(JNIEnv*, jobject, + jlong db_handle, + jlong jcf_handle, + jlong jread_options_handle) { + auto* db = reinterpret_cast(db_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + auto& read_options = + *reinterpret_cast(jread_options_handle); + return rocksdb_iterator_helper(db, read_options, cf_handle); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: iterators + * Signature: (J[JJ)[J + */ +jlongArray Java_org_rocksdb_RocksDB_iterators(JNIEnv* env, jobject, + jlong db_handle, + jlongArray jcolumn_family_handles, + jlong jread_options_handle) { + auto* db = reinterpret_cast(db_handle); + auto& read_options = + *reinterpret_cast(jread_options_handle); + + std::vector cf_handles; + if (jcolumn_family_handles != nullptr) { + const jsize len_cols = env->GetArrayLength(jcolumn_family_handles); + jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr); + if (jcfh == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + for (jsize i = 0; i < len_cols; i++) { + auto* cf_handle = + reinterpret_cast(jcfh[i]); + cf_handles.push_back(cf_handle); + } + + env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); + } + + std::vector iterators; + ROCKSDB_NAMESPACE::Status s = + db->NewIterators(read_options, cf_handles, &iterators); + if (s.ok()) { + jlongArray jLongArray = + env->NewLongArray(static_cast(iterators.size())); + if (jLongArray == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + for (std::vector::size_type i = 0; + i < iterators.size(); i++) { + env->SetLongArrayRegion( + jLongArray, static_cast(i), 1, + const_cast(reinterpret_cast(&iterators[i]))); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jLongArray); + return nullptr; + } + } + + return jLongArray; + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } +} + +/* + * Method: getSnapshot + * Signature: (J)J + */ +jlong Java_org_rocksdb_RocksDB_getSnapshot(JNIEnv*, jobject, jlong db_handle) { + auto* db = reinterpret_cast(db_handle); + const ROCKSDB_NAMESPACE::Snapshot* snapshot = db->GetSnapshot(); + return GET_CPLUSPLUS_POINTER(snapshot); +} + +/* + * Method: releaseSnapshot + * Signature: (JJ)V + */ +void Java_org_rocksdb_RocksDB_releaseSnapshot(JNIEnv*, jobject, jlong db_handle, + jlong snapshot_handle) { + auto* db = reinterpret_cast(db_handle); + auto* snapshot = + reinterpret_cast(snapshot_handle); + db->ReleaseSnapshot(snapshot); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getProperty + * Signature: (JJLjava/lang/String;I)Ljava/lang/String; + */ +jstring Java_org_rocksdb_RocksDB_getProperty(JNIEnv* env, jobject, + jlong jdb_handle, jlong jcf_handle, + jstring jproperty, + jint jproperty_len) { + const char* property = env->GetStringUTFChars(jproperty, nullptr); + if (property == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + ROCKSDB_NAMESPACE::Slice property_name(property, jproperty_len); + + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + + std::string property_value; + bool retCode = db->GetProperty(cf_handle, property_name, &property_value); + env->ReleaseStringUTFChars(jproperty, property); + + if (retCode) { + return env->NewStringUTF(property_value.c_str()); + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::NotFound()); + return nullptr; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getMapProperty + * Signature: (JJLjava/lang/String;I)Ljava/util/Map; + */ +jobject Java_org_rocksdb_RocksDB_getMapProperty(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jcf_handle, + jstring jproperty, + jint jproperty_len) { + const char* property = env->GetStringUTFChars(jproperty, nullptr); + if (property == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + ROCKSDB_NAMESPACE::Slice property_name(property, jproperty_len); + + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + + std::map property_value; + bool retCode = db->GetMapProperty(cf_handle, property_name, &property_value); + env->ReleaseStringUTFChars(jproperty, property); + + if (retCode) { + return ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(env, &property_value); + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::NotFound()); + return nullptr; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getLongProperty + * Signature: (JJLjava/lang/String;I)J + */ +jlong Java_org_rocksdb_RocksDB_getLongProperty(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jcf_handle, + jstring jproperty, + jint jproperty_len) { + const char* property = env->GetStringUTFChars(jproperty, nullptr); + if (property == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + ROCKSDB_NAMESPACE::Slice property_name(property, jproperty_len); + + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + + uint64_t property_value; + bool retCode = db->GetIntProperty(cf_handle, property_name, &property_value); + env->ReleaseStringUTFChars(jproperty, property); + + if (retCode) { + return property_value; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::NotFound()); + return 0; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: resetStats + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_resetStats(JNIEnv*, jobject, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + db->ResetStats(); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getAggregatedLongProperty + * Signature: (JLjava/lang/String;I)J + */ +jlong Java_org_rocksdb_RocksDB_getAggregatedLongProperty(JNIEnv* env, jobject, + jlong db_handle, + jstring jproperty, + jint jproperty_len) { + const char* property = env->GetStringUTFChars(jproperty, nullptr); + if (property == nullptr) { + return 0; + } + ROCKSDB_NAMESPACE::Slice property_name(property, jproperty_len); + auto* db = reinterpret_cast(db_handle); + uint64_t property_value = 0; + bool retCode = db->GetAggregatedIntProperty(property_name, &property_value); + env->ReleaseStringUTFChars(jproperty, property); + + if (retCode) { + return property_value; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::NotFound()); + return 0; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getApproximateSizes + * Signature: (JJ[JB)[J + */ +jlongArray Java_org_rocksdb_RocksDB_getApproximateSizes( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jlongArray jrange_slice_handles, jbyte jinclude_flags) { + const jsize jlen = env->GetArrayLength(jrange_slice_handles); + const size_t range_count = jlen / 2; + + jlong* jranges = env->GetLongArrayElements(jrange_slice_handles, nullptr); + if (jranges == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + auto ranges = std::unique_ptr( + new ROCKSDB_NAMESPACE::Range[range_count]); + size_t range_offset = 0; + for (jsize i = 0; i < jlen; ++i) { + auto* start = reinterpret_cast(jranges[i]); + auto* limit = reinterpret_cast(jranges[++i]); + ranges.get()[range_offset++] = ROCKSDB_NAMESPACE::Range(*start, *limit); + } + + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + + auto sizes = std::unique_ptr(new uint64_t[range_count]); + + ROCKSDB_NAMESPACE::DB::SizeApproximationFlags include_flags = + ROCKSDB_NAMESPACE::DB::SizeApproximationFlags::NONE; + if (jinclude_flags & 1) { + include_flags = + ROCKSDB_NAMESPACE::DB::SizeApproximationFlags::INCLUDE_MEMTABLES; + } + if (jinclude_flags & 2) { + include_flags = + (include_flags | + ROCKSDB_NAMESPACE::DB::SizeApproximationFlags::INCLUDE_FILES); + } + + db->GetApproximateSizes(cf_handle, ranges.get(), + static_cast(range_count), sizes.get(), + include_flags); + + // release LongArrayElements + env->ReleaseLongArrayElements(jrange_slice_handles, jranges, JNI_ABORT); + + // prepare results + auto results = std::unique_ptr(new jlong[range_count]); + for (size_t i = 0; i < range_count; ++i) { + results.get()[i] = static_cast(sizes.get()[i]); + } + + const jsize jrange_count = jlen / 2; + jlongArray jresults = env->NewLongArray(jrange_count); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetLongArrayRegion(jresults, 0, jrange_count, results.get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresults); + return nullptr; + } + + return jresults; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getApproximateMemTableStats + * Signature: (JJJJ)[J + */ +jlongArray Java_org_rocksdb_RocksDB_getApproximateMemTableStats( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jlong jstartHandle, jlong jlimitHandle) { + auto* start = reinterpret_cast(jstartHandle); + auto* limit = reinterpret_cast(jlimitHandle); + const ROCKSDB_NAMESPACE::Range range(*start, *limit); + + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + + uint64_t count = 0; + uint64_t sizes = 0; + db->GetApproximateMemTableStats(cf_handle, range, &count, &sizes); + + // prepare results + jlong results[2] = {static_cast(count), static_cast(sizes)}; + + jlongArray jsizes = env->NewLongArray(2); + if (jsizes == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetLongArrayRegion(jsizes, 0, 2, results); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jsizes); + return nullptr; + } + + return jsizes; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: compactRange + * Signature: (J[BI[BIJJ)V + */ +void Java_org_rocksdb_RocksDB_compactRange(JNIEnv* env, jobject, + jlong jdb_handle, jbyteArray jbegin, + jint jbegin_len, jbyteArray jend, + jint jend_len, + jlong jcompact_range_opts_handle, + jlong jcf_handle) { + jboolean has_exception = JNI_FALSE; + + std::string str_begin; + if (jbegin_len > 0) { + str_begin = ROCKSDB_NAMESPACE::JniUtil::byteString( + env, jbegin, jbegin_len, + [](const char* str, const size_t len) { return std::string(str, len); }, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + } + + std::string str_end; + if (jend_len > 0) { + str_end = ROCKSDB_NAMESPACE::JniUtil::byteString( + env, jend, jend_len, + [](const char* str, const size_t len) { return std::string(str, len); }, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + } + + ROCKSDB_NAMESPACE::CompactRangeOptions* compact_range_opts = nullptr; + if (jcompact_range_opts_handle == 0) { + // NOTE: we DO own the pointer! + compact_range_opts = new ROCKSDB_NAMESPACE::CompactRangeOptions(); + } else { + // NOTE: we do NOT own the pointer! + compact_range_opts = + reinterpret_cast( + jcompact_range_opts_handle); + } + + auto* db = reinterpret_cast(jdb_handle); + + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + + ROCKSDB_NAMESPACE::Status s; + if (jbegin_len > 0 || jend_len > 0) { + const ROCKSDB_NAMESPACE::Slice begin(str_begin); + const ROCKSDB_NAMESPACE::Slice end(str_end); + s = db->CompactRange(*compact_range_opts, cf_handle, &begin, &end); + } else { + s = db->CompactRange(*compact_range_opts, cf_handle, nullptr, nullptr); + } + + if (jcompact_range_opts_handle == 0) { + delete compact_range_opts; + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: setOptions + * Signature: (JJ[Ljava/lang/String;[Ljava/lang/String;)V + */ +void Java_org_rocksdb_RocksDB_setOptions(JNIEnv* env, jobject, jlong jdb_handle, + jlong jcf_handle, jobjectArray jkeys, + jobjectArray jvalues) { + const jsize len = env->GetArrayLength(jkeys); + assert(len == env->GetArrayLength(jvalues)); + + std::unordered_map options_map; + for (jsize i = 0; i < len; i++) { + jobject jobj_key = env->GetObjectArrayElement(jkeys, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + return; + } + + jobject jobj_value = env->GetObjectArrayElement(jvalues, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jobj_key); + return; + } + + jboolean has_exception = JNI_FALSE; + std::string s_key = ROCKSDB_NAMESPACE::JniUtil::copyStdString( + env, reinterpret_cast(jobj_key), &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + env->DeleteLocalRef(jobj_value); + env->DeleteLocalRef(jobj_key); + return; + } + + std::string s_value = ROCKSDB_NAMESPACE::JniUtil::copyStdString( + env, reinterpret_cast(jobj_value), &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + env->DeleteLocalRef(jobj_value); + env->DeleteLocalRef(jobj_key); + return; + } + + options_map[s_key] = s_value; + + env->DeleteLocalRef(jobj_key); + env->DeleteLocalRef(jobj_value); + } + + auto* db = reinterpret_cast(jdb_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + if (cf_handle == nullptr) { + cf_handle = db->DefaultColumnFamily(); + } + auto s = db->SetOptions(cf_handle, options_map); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: setDBOptions + * Signature: (J[Ljava/lang/String;[Ljava/lang/String;)V + */ +void Java_org_rocksdb_RocksDB_setDBOptions(JNIEnv* env, jobject, + jlong jdb_handle, jobjectArray jkeys, + jobjectArray jvalues) { + const jsize len = env->GetArrayLength(jkeys); + assert(len == env->GetArrayLength(jvalues)); + + std::unordered_map options_map; + for (jsize i = 0; i < len; i++) { + jobject jobj_key = env->GetObjectArrayElement(jkeys, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + return; + } + + jobject jobj_value = env->GetObjectArrayElement(jvalues, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jobj_key); + return; + } + + jboolean has_exception = JNI_FALSE; + std::string s_key = ROCKSDB_NAMESPACE::JniUtil::copyStdString( + env, reinterpret_cast(jobj_key), &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + env->DeleteLocalRef(jobj_value); + env->DeleteLocalRef(jobj_key); + return; + } + + std::string s_value = ROCKSDB_NAMESPACE::JniUtil::copyStdString( + env, reinterpret_cast(jobj_value), &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + env->DeleteLocalRef(jobj_value); + env->DeleteLocalRef(jobj_key); + return; + } + + options_map[s_key] = s_value; + + env->DeleteLocalRef(jobj_key); + env->DeleteLocalRef(jobj_value); + } + + auto* db = reinterpret_cast(jdb_handle); + auto s = db->SetDBOptions(options_map); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getOptions + * Signature: (JJ)Ljava/lang/String; + */ +jstring Java_org_rocksdb_RocksDB_getOptions(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + + auto options = db->GetOptions(cf_handle); + std::string options_as_string; + ROCKSDB_NAMESPACE::Status s = + GetStringFromColumnFamilyOptions(&options_as_string, options); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + return env->NewStringUTF(options_as_string.c_str()); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getDBOptions + * Signature: (J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_RocksDB_getDBOptions(JNIEnv* env, jobject, + jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + + auto options = db->GetDBOptions(); + std::string options_as_string; + ROCKSDB_NAMESPACE::Status s = + GetStringFromDBOptions(&options_as_string, options); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + return env->NewStringUTF(options_as_string.c_str()); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: compactFiles + * Signature: (JJJ[Ljava/lang/String;IIJ)[Ljava/lang/String; + */ +jobjectArray Java_org_rocksdb_RocksDB_compactFiles( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcompaction_opts_handle, + jlong jcf_handle, jobjectArray jinput_file_names, jint joutput_level, + jint joutput_path_id, jlong jcompaction_job_info_handle) { + jboolean has_exception = JNI_FALSE; + const std::vector input_file_names = + ROCKSDB_NAMESPACE::JniUtil::copyStrings(env, jinput_file_names, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return nullptr; + } + + auto* compaction_opts = + reinterpret_cast( + jcompaction_opts_handle); + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + + ROCKSDB_NAMESPACE::CompactionJobInfo* compaction_job_info = nullptr; + if (jcompaction_job_info_handle != 0) { + compaction_job_info = + reinterpret_cast( + jcompaction_job_info_handle); + } + + std::vector output_file_names; + auto s = db->CompactFiles(*compaction_opts, cf_handle, input_file_names, + static_cast(joutput_level), + static_cast(joutput_path_id), + &output_file_names, compaction_job_info); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + return ROCKSDB_NAMESPACE::JniUtil::toJavaStrings(env, &output_file_names); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: cancelAllBackgroundWork + * Signature: (JZ)V + */ +void Java_org_rocksdb_RocksDB_cancelAllBackgroundWork(JNIEnv*, jobject, + jlong jdb_handle, + jboolean jwait) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::CancelAllBackgroundWork(db, jwait); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: pauseBackgroundWork + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_pauseBackgroundWork(JNIEnv* env, jobject, + jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->PauseBackgroundWork(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: continueBackgroundWork + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_continueBackgroundWork(JNIEnv* env, jobject, + jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->ContinueBackgroundWork(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: enableAutoCompaction + * Signature: (J[J)V + */ +void Java_org_rocksdb_RocksDB_enableAutoCompaction(JNIEnv* env, jobject, + jlong jdb_handle, + jlongArray jcf_handles) { + auto* db = reinterpret_cast(jdb_handle); + jboolean has_exception = JNI_FALSE; + const std::vector cf_handles = + ROCKSDB_NAMESPACE::JniUtil::fromJPointers< + ROCKSDB_NAMESPACE::ColumnFamilyHandle>(env, jcf_handles, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + db->EnableAutoCompaction(cf_handles); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: numberLevels + * Signature: (JJ)I + */ +jint Java_org_rocksdb_RocksDB_numberLevels(JNIEnv*, jobject, jlong jdb_handle, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + return static_cast(db->NumberLevels(cf_handle)); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: maxMemCompactionLevel + * Signature: (JJ)I + */ +jint Java_org_rocksdb_RocksDB_maxMemCompactionLevel(JNIEnv*, jobject, + jlong jdb_handle, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + return static_cast(db->MaxMemCompactionLevel(cf_handle)); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: level0StopWriteTrigger + * Signature: (JJ)I + */ +jint Java_org_rocksdb_RocksDB_level0StopWriteTrigger(JNIEnv*, jobject, + jlong jdb_handle, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + return static_cast(db->Level0StopWriteTrigger(cf_handle)); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getName + * Signature: (J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_RocksDB_getName(JNIEnv* env, jobject, + jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + std::string name = db->GetName(); + return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, false); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getEnv + * Signature: (J)J + */ +jlong Java_org_rocksdb_RocksDB_getEnv(JNIEnv*, jobject, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + return GET_CPLUSPLUS_POINTER(db->GetEnv()); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: flush + * Signature: (JJ[J)V + */ +void Java_org_rocksdb_RocksDB_flush(JNIEnv* env, jobject, jlong jdb_handle, + jlong jflush_opts_handle, + jlongArray jcf_handles) { + auto* db = reinterpret_cast(jdb_handle); + auto* flush_opts = + reinterpret_cast(jflush_opts_handle); + std::vector cf_handles; + if (jcf_handles == nullptr) { + cf_handles.push_back(db->DefaultColumnFamily()); + } else { + jboolean has_exception = JNI_FALSE; + cf_handles = ROCKSDB_NAMESPACE::JniUtil::fromJPointers< + ROCKSDB_NAMESPACE::ColumnFamilyHandle>(env, jcf_handles, + &has_exception); + if (has_exception) { + // exception occurred + return; + } + } + auto s = db->Flush(*flush_opts, cf_handles); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: flushWal + * Signature: (JZ)V + */ +void Java_org_rocksdb_RocksDB_flushWal(JNIEnv* env, jobject, jlong jdb_handle, + jboolean jsync) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->FlushWAL(jsync == JNI_TRUE); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: syncWal + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_syncWal(JNIEnv* env, jobject, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->SyncWAL(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getLatestSequenceNumber + * Signature: (J)V + */ +jlong Java_org_rocksdb_RocksDB_getLatestSequenceNumber(JNIEnv*, jobject, + jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + return db->GetLatestSequenceNumber(); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: disableFileDeletions + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_disableFileDeletions(JNIEnv* env, jobject, + jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::Status s = db->DisableFileDeletions(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: enableFileDeletions + * Signature: (JZ)V + */ +void Java_org_rocksdb_RocksDB_enableFileDeletions(JNIEnv* env, jobject, + jlong jdb_handle, + jboolean jforce) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::Status s = db->EnableFileDeletions(jforce); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getLiveFiles + * Signature: (JZ)[Ljava/lang/String; + */ +jobjectArray Java_org_rocksdb_RocksDB_getLiveFiles(JNIEnv* env, jobject, + jlong jdb_handle, + jboolean jflush_memtable) { + auto* db = reinterpret_cast(jdb_handle); + std::vector live_files; + uint64_t manifest_file_size = 0; + auto s = db->GetLiveFiles(live_files, &manifest_file_size, + jflush_memtable == JNI_TRUE); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + // append the manifest_file_size to the vector + // for passing back to java + live_files.push_back(std::to_string(manifest_file_size)); + + return ROCKSDB_NAMESPACE::JniUtil::toJavaStrings(env, &live_files); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getSortedWalFiles + * Signature: (J)[Lorg/rocksdb/LogFile; + */ +jobjectArray Java_org_rocksdb_RocksDB_getSortedWalFiles(JNIEnv* env, jobject, + jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + std::vector> sorted_wal_files; + auto s = db->GetSortedWalFiles(sorted_wal_files); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + // convert to Java type + const jsize jlen = static_cast(sorted_wal_files.size()); + jobjectArray jsorted_wal_files = env->NewObjectArray( + jlen, ROCKSDB_NAMESPACE::LogFileJni::getJClass(env), nullptr); + if (jsorted_wal_files == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + jsize i = 0; + for (auto it = sorted_wal_files.begin(); it != sorted_wal_files.end(); ++it) { + jobject jlog_file = + ROCKSDB_NAMESPACE::LogFileJni::fromCppLogFile(env, it->get()); + if (jlog_file == nullptr) { + // exception occurred + env->DeleteLocalRef(jsorted_wal_files); + return nullptr; + } + + env->SetObjectArrayElement(jsorted_wal_files, i++, jlog_file); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(jlog_file); + env->DeleteLocalRef(jsorted_wal_files); + return nullptr; + } + + env->DeleteLocalRef(jlog_file); + } + + return jsorted_wal_files; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getUpdatesSince + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_RocksDB_getUpdatesSince(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jsequence_number) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::SequenceNumber sequence_number = + static_cast(jsequence_number); + std::unique_ptr iter; + ROCKSDB_NAMESPACE::Status s = db->GetUpdatesSince(sequence_number, &iter); + if (s.ok()) { + return GET_CPLUSPLUS_POINTER(iter.release()); + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return 0; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: deleteFile + * Signature: (JLjava/lang/String;)V + */ +void Java_org_rocksdb_RocksDB_deleteFile(JNIEnv* env, jobject, jlong jdb_handle, + jstring jname) { + auto* db = reinterpret_cast(jdb_handle); + jboolean has_exception = JNI_FALSE; + std::string name = + ROCKSDB_NAMESPACE::JniUtil::copyStdString(env, jname, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + db->DeleteFile(name); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getLiveFilesMetaData + * Signature: (J)[Lorg/rocksdb/LiveFileMetaData; + */ +jobjectArray Java_org_rocksdb_RocksDB_getLiveFilesMetaData(JNIEnv* env, jobject, + jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + std::vector live_files_meta_data; + db->GetLiveFilesMetaData(&live_files_meta_data); + + // convert to Java type + const jsize jlen = static_cast(live_files_meta_data.size()); + jobjectArray jlive_files_meta_data = env->NewObjectArray( + jlen, ROCKSDB_NAMESPACE::LiveFileMetaDataJni::getJClass(env), nullptr); + if (jlive_files_meta_data == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + jsize i = 0; + for (auto it = live_files_meta_data.begin(); it != live_files_meta_data.end(); + ++it) { + jobject jlive_file_meta_data = + ROCKSDB_NAMESPACE::LiveFileMetaDataJni::fromCppLiveFileMetaData(env, + &(*it)); + if (jlive_file_meta_data == nullptr) { + // exception occurred + env->DeleteLocalRef(jlive_files_meta_data); + return nullptr; + } + + env->SetObjectArrayElement(jlive_files_meta_data, i++, + jlive_file_meta_data); + if (env->ExceptionCheck()) { + // exception occurred + env->DeleteLocalRef(jlive_file_meta_data); + env->DeleteLocalRef(jlive_files_meta_data); + return nullptr; + } + + env->DeleteLocalRef(jlive_file_meta_data); + } + + return jlive_files_meta_data; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getColumnFamilyMetaData + * Signature: (JJ)Lorg/rocksdb/ColumnFamilyMetaData; + */ +jobject Java_org_rocksdb_RocksDB_getColumnFamilyMetaData(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + ROCKSDB_NAMESPACE::ColumnFamilyMetaData cf_metadata; + db->GetColumnFamilyMetaData(cf_handle, &cf_metadata); + return ROCKSDB_NAMESPACE::ColumnFamilyMetaDataJni:: + fromCppColumnFamilyMetaData(env, &cf_metadata); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: ingestExternalFile + * Signature: (JJ[Ljava/lang/String;IJ)V + */ +void Java_org_rocksdb_RocksDB_ingestExternalFile( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jobjectArray jfile_path_list, jint jfile_path_list_len, + jlong jingest_external_file_options_handle) { + jboolean has_exception = JNI_FALSE; + std::vector file_path_list = + ROCKSDB_NAMESPACE::JniUtil::copyStrings( + env, jfile_path_list, jfile_path_list_len, &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return; + } + + auto* db = reinterpret_cast(jdb_handle); + auto* column_family = + reinterpret_cast(jcf_handle); + auto* ifo = reinterpret_cast( + jingest_external_file_options_handle); + ROCKSDB_NAMESPACE::Status s = + db->IngestExternalFile(column_family, file_path_list, *ifo); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: verifyChecksum + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_verifyChecksum(JNIEnv* env, jobject, + jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->VerifyChecksum(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getDefaultColumnFamily + * Signature: (J)J + */ +jlong Java_org_rocksdb_RocksDB_getDefaultColumnFamily(JNIEnv*, jobject, + jlong jdb_handle) { + auto* db_handle = reinterpret_cast(jdb_handle); + auto* cf_handle = db_handle->DefaultColumnFamily(); + return GET_CPLUSPLUS_POINTER(cf_handle); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getPropertiesOfAllTables + * Signature: (JJ)Ljava/util/Map; + */ +jobject Java_org_rocksdb_RocksDB_getPropertiesOfAllTables(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + ROCKSDB_NAMESPACE::TablePropertiesCollection table_properties_collection; + auto s = + db->GetPropertiesOfAllTables(cf_handle, &table_properties_collection); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } + + // convert to Java type + jobject jhash_map = ROCKSDB_NAMESPACE::HashMapJni::construct( + env, static_cast(table_properties_collection.size())); + if (jhash_map == nullptr) { + // exception occurred + return nullptr; + } + + const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV< + const std::string, + const std::shared_ptr, jobject, + jobject> + fn_map_kv = + [env](const std::pair>& + kv) { + jstring jkey = ROCKSDB_NAMESPACE::JniUtil::toJavaString( + env, &(kv.first), false); + if (env->ExceptionCheck()) { + // an error occurred + return std::unique_ptr>(nullptr); + } + + jobject jtable_properties = + ROCKSDB_NAMESPACE::TablePropertiesJni::fromCppTableProperties( + env, *(kv.second.get())); + if (jtable_properties == nullptr) { + // an error occurred + env->DeleteLocalRef(jkey); + return std::unique_ptr>(nullptr); + } + + return std::unique_ptr>( + new std::pair( + static_cast(jkey), + static_cast(jtable_properties))); + }; + + if (!ROCKSDB_NAMESPACE::HashMapJni::putAll( + env, jhash_map, table_properties_collection.begin(), + table_properties_collection.end(), fn_map_kv)) { + // exception occurred + return nullptr; + } + + return jhash_map; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getPropertiesOfTablesInRange + * Signature: (JJ[J)Ljava/util/Map; + */ +jobject Java_org_rocksdb_RocksDB_getPropertiesOfTablesInRange( + JNIEnv* env, jobject, jlong jdb_handle, jlong jcf_handle, + jlongArray jrange_slice_handles) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + const jsize jlen = env->GetArrayLength(jrange_slice_handles); + jlong* jrange_slice_handle = + env->GetLongArrayElements(jrange_slice_handles, nullptr); + if (jrange_slice_handle == nullptr) { + // exception occurred + return nullptr; + } + + const size_t ranges_len = static_cast(jlen / 2); + auto ranges = std::unique_ptr( + new ROCKSDB_NAMESPACE::Range[ranges_len]); + for (jsize i = 0, j = 0; i < jlen; ++i) { + auto* start = + reinterpret_cast(jrange_slice_handle[i]); + auto* limit = + reinterpret_cast(jrange_slice_handle[++i]); + ranges[j++] = ROCKSDB_NAMESPACE::Range(*start, *limit); + } + + ROCKSDB_NAMESPACE::TablePropertiesCollection table_properties_collection; + auto s = db->GetPropertiesOfTablesInRange(cf_handle, ranges.get(), ranges_len, + &table_properties_collection); + if (!s.ok()) { + // error occurred + env->ReleaseLongArrayElements(jrange_slice_handles, jrange_slice_handle, + JNI_ABORT); + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + // cleanup + env->ReleaseLongArrayElements(jrange_slice_handles, jrange_slice_handle, + JNI_ABORT); + + return jrange_slice_handles; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: suggestCompactRange + * Signature: (JJ)[J + */ +jlongArray Java_org_rocksdb_RocksDB_suggestCompactRange(JNIEnv* env, jobject, + jlong jdb_handle, + jlong jcf_handle) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + auto* begin = new ROCKSDB_NAMESPACE::Slice(); + auto* end = new ROCKSDB_NAMESPACE::Slice(); + auto s = db->SuggestCompactRange(cf_handle, begin, end); + if (!s.ok()) { + // error occurred + delete begin; + delete end; + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } + + jlongArray jslice_handles = env->NewLongArray(2); + if (jslice_handles == nullptr) { + // exception thrown: OutOfMemoryError + delete begin; + delete end; + return nullptr; + } + + jlong slice_handles[2]; + slice_handles[0] = GET_CPLUSPLUS_POINTER(begin); + slice_handles[1] = GET_CPLUSPLUS_POINTER(end); + env->SetLongArrayRegion(jslice_handles, 0, 2, slice_handles); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete begin; + delete end; + env->DeleteLocalRef(jslice_handles); + return nullptr; + } + + return jslice_handles; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: promoteL0 + * Signature: (JJI)V + */ +void Java_org_rocksdb_RocksDB_promoteL0(JNIEnv*, jobject, jlong jdb_handle, + jlong jcf_handle, jint jtarget_level) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* cf_handle; + if (jcf_handle == 0) { + cf_handle = db->DefaultColumnFamily(); + } else { + cf_handle = + reinterpret_cast(jcf_handle); + } + db->PromoteL0(cf_handle, static_cast(jtarget_level)); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: startTrace + * Signature: (JJJ)V + */ +void Java_org_rocksdb_RocksDB_startTrace( + JNIEnv* env, jobject, jlong jdb_handle, jlong jmax_trace_file_size, + jlong jtrace_writer_jnicallback_handle) { + auto* db = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::TraceOptions trace_options; + trace_options.max_trace_file_size = + static_cast(jmax_trace_file_size); + // transfer ownership of trace writer from Java to C++ + auto trace_writer = + std::unique_ptr( + reinterpret_cast( + jtrace_writer_jnicallback_handle)); + auto s = db->StartTrace(trace_options, std::move(trace_writer)); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: endTrace + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_endTrace(JNIEnv* env, jobject, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->EndTrace(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: tryCatchUpWithPrimary + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_tryCatchUpWithPrimary(JNIEnv* env, jobject, + jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->TryCatchUpWithPrimary(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: destroyDB + * Signature: (Ljava/lang/String;J)V + */ +void Java_org_rocksdb_RocksDB_destroyDB(JNIEnv* env, jclass, jstring jdb_path, + jlong joptions_handle) { + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + auto* options = + reinterpret_cast(joptions_handle); + if (options == nullptr) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument("Invalid Options.")); + } + + ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::DestroyDB(db_path, *options); + env->ReleaseStringUTFChars(jdb_path, db_path); + + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +bool get_slice_helper(JNIEnv* env, jobjectArray ranges, jsize index, + std::unique_ptr& slice, + std::vector>& ranges_to_free) { + jobject jArray = env->GetObjectArrayElement(ranges, index); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + return false; + } + + if (jArray == nullptr) { + return true; + } + + jbyteArray jba = reinterpret_cast(jArray); + jsize len_ba = env->GetArrayLength(jba); + ranges_to_free.push_back(std::unique_ptr(new jbyte[len_ba])); + env->GetByteArrayRegion(jba, 0, len_ba, ranges_to_free.back().get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jArray); + return false; + } + env->DeleteLocalRef(jArray); + slice.reset(new ROCKSDB_NAMESPACE::Slice( + reinterpret_cast(ranges_to_free.back().get()), len_ba)); + return true; +} +/* + * Class: org_rocksdb_RocksDB + * Method: deleteFilesInRanges + * Signature: (JJLjava/util/List;Z)V + */ +void Java_org_rocksdb_RocksDB_deleteFilesInRanges(JNIEnv* env, jobject /*jdb*/, + jlong jdb_handle, + jlong jcf_handle, + jobjectArray ranges, + jboolean include_end) { + jsize length = env->GetArrayLength(ranges); + + std::vector rangesVector; + std::vector> slices; + std::vector> ranges_to_free; + for (jsize i = 0; (i + 1) < length; i += 2) { + slices.push_back(std::unique_ptr()); + if (!get_slice_helper(env, ranges, i, slices.back(), ranges_to_free)) { + // exception thrown + return; + } + + slices.push_back(std::unique_ptr()); + if (!get_slice_helper(env, ranges, i + 1, slices.back(), ranges_to_free)) { + // exception thrown + return; + } + + rangesVector.push_back(ROCKSDB_NAMESPACE::RangePtr( + slices[slices.size() - 2].get(), slices[slices.size() - 1].get())); + } + + auto* db = reinterpret_cast(jdb_handle); + auto* column_family = + reinterpret_cast(jcf_handle); + + ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::DeleteFilesInRanges( + db, column_family == nullptr ? db->DefaultColumnFamily() : column_family, + rangesVector.data(), rangesVector.size(), include_end); + + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: version + * Signature: ()I + */ +jint Java_org_rocksdb_RocksDB_version(JNIEnv*, jclass) { + uint32_t encodedVersion = (ROCKSDB_MAJOR & 0xff) << 16; + encodedVersion |= (ROCKSDB_MINOR & 0xff) << 8; + encodedVersion |= (ROCKSDB_PATCH & 0xff); + return static_cast(encodedVersion); +} diff --git a/src/rocksdb/java/rocksjni/slice.cc b/src/rocksdb/java/rocksjni/slice.cc new file mode 100644 index 000000000..63c6b1b9f --- /dev/null +++ b/src/rocksdb/java/rocksjni/slice.cc @@ -0,0 +1,374 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Slice. + +#include "rocksdb/slice.h" + +#include +#include +#include + +#include + +#include "include/org_rocksdb_AbstractSlice.h" +#include "include/org_rocksdb_DirectSlice.h" +#include "include/org_rocksdb_Slice.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +// + +/* + * Class: org_rocksdb_Slice + * Method: createNewSlice0 + * Signature: ([BI)J + */ +jlong Java_org_rocksdb_Slice_createNewSlice0(JNIEnv* env, jclass /*jcls*/, + jbyteArray data, jint offset) { + const jsize dataSize = env->GetArrayLength(data); + const int len = dataSize - offset; + + // NOTE: buf will be deleted in the Java_org_rocksdb_Slice_disposeInternalBuf + // method + jbyte* buf = new jbyte[len]; + env->GetByteArrayRegion(data, offset, len, buf); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + return 0; + } + + const auto* slice = new ROCKSDB_NAMESPACE::Slice((const char*)buf, len); + return GET_CPLUSPLUS_POINTER(slice); +} + +/* + * Class: org_rocksdb_Slice + * Method: createNewSlice1 + * Signature: ([B)J + */ +jlong Java_org_rocksdb_Slice_createNewSlice1(JNIEnv* env, jclass /*jcls*/, + jbyteArray data) { + jbyte* ptrData = env->GetByteArrayElements(data, nullptr); + if (ptrData == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + const int len = env->GetArrayLength(data) + 1; + + // NOTE: buf will be deleted in the Java_org_rocksdb_Slice_disposeInternalBuf + // method + char* buf = new char[len]; + memcpy(buf, ptrData, len - 1); + buf[len - 1] = '\0'; + + const auto* slice = new ROCKSDB_NAMESPACE::Slice(buf, len - 1); + + env->ReleaseByteArrayElements(data, ptrData, JNI_ABORT); + + return GET_CPLUSPLUS_POINTER(slice); +} + +/* + * Class: org_rocksdb_Slice + * Method: data0 + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_Slice_data0(JNIEnv* env, jobject /*jobj*/, + jlong handle) { + const auto* slice = reinterpret_cast(handle); + const jsize len = static_cast(slice->size()); + const jbyteArray data = env->NewByteArray(len); + if (data == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion( + data, 0, len, + const_cast(reinterpret_cast(slice->data()))); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(data); + return nullptr; + } + + return data; +} + +/* + * Class: org_rocksdb_Slice + * Method: clear0 + * Signature: (JZJ)V + */ +void Java_org_rocksdb_Slice_clear0(JNIEnv* /*env*/, jobject /*jobj*/, + jlong handle, jboolean shouldRelease, + jlong internalBufferOffset) { + auto* slice = reinterpret_cast(handle); + if (shouldRelease == JNI_TRUE) { + const char* buf = slice->data_ - internalBufferOffset; + delete[] buf; + } + slice->clear(); +} + +/* + * Class: org_rocksdb_Slice + * Method: removePrefix0 + * Signature: (JI)V + */ +void Java_org_rocksdb_Slice_removePrefix0(JNIEnv* /*env*/, jobject /*jobj*/, + jlong handle, jint length) { + auto* slice = reinterpret_cast(handle); + slice->remove_prefix(length); +} + +/* + * Class: org_rocksdb_DirectSlice + * Method: setLength0 + * Signature: (JI)V + */ +void Java_org_rocksdb_DirectSlice_setLength0(JNIEnv* /*env*/, jobject /*jobj*/, + jlong handle, jint length) { + auto* slice = reinterpret_cast(handle); + slice->size_ = length; +} + +/* + * Class: org_rocksdb_Slice + * Method: disposeInternalBuf + * Signature: (JJ)V + */ +void Java_org_rocksdb_Slice_disposeInternalBuf(JNIEnv* /*env*/, + jobject /*jobj*/, jlong handle, + jlong internalBufferOffset) { + const auto* slice = reinterpret_cast(handle); + const char* buf = slice->data_ - internalBufferOffset; + delete[] buf; +} + +// + +// (data_addr); + const auto* slice = new ROCKSDB_NAMESPACE::Slice(ptrData, length); + return GET_CPLUSPLUS_POINTER(slice); +} + +/* + * Class: org_rocksdb_DirectSlice + * Method: createNewDirectSlice1 + * Signature: (Ljava/nio/ByteBuffer;)J + */ +jlong Java_org_rocksdb_DirectSlice_createNewDirectSlice1(JNIEnv* env, + jclass /*jcls*/, + jobject data) { + void* data_addr = env->GetDirectBufferAddress(data); + if (data_addr == nullptr) { + // error: memory region is undefined, given object is not a direct + // java.nio.Buffer, or JNI access to direct buffers is not supported by JVM + ROCKSDB_NAMESPACE::IllegalArgumentExceptionJni::ThrowNew( + env, ROCKSDB_NAMESPACE::Status::InvalidArgument( + "Could not access DirectBuffer")); + return 0; + } + + const auto* ptrData = reinterpret_cast(data_addr); + const auto* slice = new ROCKSDB_NAMESPACE::Slice(ptrData); + return GET_CPLUSPLUS_POINTER(slice); +} + +/* + * Class: org_rocksdb_DirectSlice + * Method: data0 + * Signature: (J)Ljava/lang/Object; + */ +jobject Java_org_rocksdb_DirectSlice_data0(JNIEnv* env, jobject /*jobj*/, + jlong handle) { + const auto* slice = reinterpret_cast(handle); + return env->NewDirectByteBuffer(const_cast(slice->data()), + slice->size()); +} + +/* + * Class: org_rocksdb_DirectSlice + * Method: get0 + * Signature: (JI)B + */ +jbyte Java_org_rocksdb_DirectSlice_get0(JNIEnv* /*env*/, jobject /*jobj*/, + jlong handle, jint offset) { + const auto* slice = reinterpret_cast(handle); + return (*slice)[offset]; +} + +/* + * Class: org_rocksdb_DirectSlice + * Method: clear0 + * Signature: (JZJ)V + */ +void Java_org_rocksdb_DirectSlice_clear0(JNIEnv* /*env*/, jobject /*jobj*/, + jlong handle, jboolean shouldRelease, + jlong internalBufferOffset) { + auto* slice = reinterpret_cast(handle); + if (shouldRelease == JNI_TRUE) { + const char* buf = slice->data_ - internalBufferOffset; + delete[] buf; + } + slice->clear(); +} + +/* + * Class: org_rocksdb_DirectSlice + * Method: removePrefix0 + * Signature: (JI)V + */ +void Java_org_rocksdb_DirectSlice_removePrefix0(JNIEnv* /*env*/, + jobject /*jobj*/, jlong handle, + jint length) { + auto* slice = reinterpret_cast(handle); + slice->remove_prefix(length); +} + +/* + * Class: org_rocksdb_DirectSlice + * Method: disposeInternalBuf + * Signature: (JJ)V + */ +void Java_org_rocksdb_DirectSlice_disposeInternalBuf( + JNIEnv* /*env*/, jobject /*jobj*/, jlong handle, + jlong internalBufferOffset) { + const auto* slice = reinterpret_cast(handle); + const char* buf = slice->data_ - internalBufferOffset; + delete[] buf; +} + +// diff --git a/src/rocksdb/java/rocksjni/snapshot.cc b/src/rocksdb/java/rocksjni/snapshot.cc new file mode 100644 index 000000000..2a1265a58 --- /dev/null +++ b/src/rocksdb/java/rocksjni/snapshot.cc @@ -0,0 +1,27 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++. + +#include +#include +#include + +#include "include/org_rocksdb_Snapshot.h" +#include "rocksdb/db.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_Snapshot + * Method: getSequenceNumber + * Signature: (J)J + */ +jlong Java_org_rocksdb_Snapshot_getSequenceNumber(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jsnapshot_handle) { + auto* snapshot = + reinterpret_cast(jsnapshot_handle); + return snapshot->GetSequenceNumber(); +} diff --git a/src/rocksdb/java/rocksjni/sst_file_manager.cc b/src/rocksdb/java/rocksjni/sst_file_manager.cc new file mode 100644 index 000000000..c51436819 --- /dev/null +++ b/src/rocksdb/java/rocksjni/sst_file_manager.cc @@ -0,0 +1,250 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling C++ ROCKSDB_NAMESPACE::SstFileManager methods +// from Java side. + +#include "rocksdb/sst_file_manager.h" + +#include + +#include + +#include "include/org_rocksdb_SstFileManager.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_SstFileManager + * Method: newSstFileManager + * Signature: (JJJDJ)J + */ +jlong Java_org_rocksdb_SstFileManager_newSstFileManager( + JNIEnv* jnienv, jclass /*jcls*/, jlong jenv_handle, jlong jlogger_handle, + jlong jrate_bytes, jdouble jmax_trash_db_ratio, + jlong jmax_delete_chunk_bytes) { + auto* env = reinterpret_cast(jenv_handle); + ROCKSDB_NAMESPACE::Status s; + ROCKSDB_NAMESPACE::SstFileManager* sst_file_manager = nullptr; + + if (jlogger_handle != 0) { + auto* sptr_logger = + reinterpret_cast*>( + jlogger_handle); + sst_file_manager = ROCKSDB_NAMESPACE::NewSstFileManager( + env, *sptr_logger, "", jrate_bytes, true, &s, jmax_trash_db_ratio, + jmax_delete_chunk_bytes); + } else { + sst_file_manager = ROCKSDB_NAMESPACE::NewSstFileManager( + env, nullptr, "", jrate_bytes, true, &s, jmax_trash_db_ratio, + jmax_delete_chunk_bytes); + } + + if (!s.ok()) { + if (sst_file_manager != nullptr) { + delete sst_file_manager; + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(jnienv, s); + } + auto* sptr_sst_file_manager = + new std::shared_ptr(sst_file_manager); + + return GET_CPLUSPLUS_POINTER(sptr_sst_file_manager); +} + +/* + * Class: org_rocksdb_SstFileManager + * Method: setMaxAllowedSpaceUsage + * Signature: (JJ)V + */ +void Java_org_rocksdb_SstFileManager_setMaxAllowedSpaceUsage( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jmax_allowed_space) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jhandle); + sptr_sst_file_manager->get()->SetMaxAllowedSpaceUsage(jmax_allowed_space); +} + +/* + * Class: org_rocksdb_SstFileManager + * Method: setCompactionBufferSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_SstFileManager_setCompactionBufferSize( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jcompaction_buffer_size) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jhandle); + sptr_sst_file_manager->get()->SetCompactionBufferSize( + jcompaction_buffer_size); +} + +/* + * Class: org_rocksdb_SstFileManager + * Method: isMaxAllowedSpaceReached + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_SstFileManager_isMaxAllowedSpaceReached( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jhandle); + return sptr_sst_file_manager->get()->IsMaxAllowedSpaceReached(); +} + +/* + * Class: org_rocksdb_SstFileManager + * Method: isMaxAllowedSpaceReachedIncludingCompactions + * Signature: (J)Z + */ +jboolean +Java_org_rocksdb_SstFileManager_isMaxAllowedSpaceReachedIncludingCompactions( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jhandle); + return sptr_sst_file_manager->get() + ->IsMaxAllowedSpaceReachedIncludingCompactions(); +} + +/* + * Class: org_rocksdb_SstFileManager + * Method: getTotalSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_SstFileManager_getTotalSize(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jhandle); + return sptr_sst_file_manager->get()->GetTotalSize(); +} + +/* + * Class: org_rocksdb_SstFileManager + * Method: getTrackedFiles + * Signature: (J)Ljava/util/Map; + */ +jobject Java_org_rocksdb_SstFileManager_getTrackedFiles(JNIEnv* env, + jobject /*jobj*/, + jlong jhandle) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jhandle); + auto tracked_files = sptr_sst_file_manager->get()->GetTrackedFiles(); + + // TODO(AR) could refactor to share code with + // ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(env, tracked_files); + + const jobject jtracked_files = ROCKSDB_NAMESPACE::HashMapJni::construct( + env, static_cast(tracked_files.size())); + if (jtracked_files == nullptr) { + // exception occurred + return nullptr; + } + + const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV + fn_map_kv = + [env](const std::pair& pair) { + const jstring jtracked_file_path = + env->NewStringUTF(pair.first.c_str()); + if (jtracked_file_path == nullptr) { + // an error occurred + return std::unique_ptr>(nullptr); + } + const jobject jtracked_file_size = + ROCKSDB_NAMESPACE::LongJni::valueOf(env, pair.second); + if (jtracked_file_size == nullptr) { + // an error occurred + return std::unique_ptr>(nullptr); + } + return std::unique_ptr>( + new std::pair(jtracked_file_path, + jtracked_file_size)); + }; + + if (!ROCKSDB_NAMESPACE::HashMapJni::putAll(env, jtracked_files, + tracked_files.begin(), + tracked_files.end(), fn_map_kv)) { + // exception occcurred + return nullptr; + } + + return jtracked_files; +} + +/* + * Class: org_rocksdb_SstFileManager + * Method: getDeleteRateBytesPerSecond + * Signature: (J)J + */ +jlong Java_org_rocksdb_SstFileManager_getDeleteRateBytesPerSecond( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jhandle); + return sptr_sst_file_manager->get()->GetDeleteRateBytesPerSecond(); +} + +/* + * Class: org_rocksdb_SstFileManager + * Method: setDeleteRateBytesPerSecond + * Signature: (JJ)V + */ +void Java_org_rocksdb_SstFileManager_setDeleteRateBytesPerSecond( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong jdelete_rate) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jhandle); + sptr_sst_file_manager->get()->SetDeleteRateBytesPerSecond(jdelete_rate); +} + +/* + * Class: org_rocksdb_SstFileManager + * Method: getMaxTrashDBRatio + * Signature: (J)D + */ +jdouble Java_org_rocksdb_SstFileManager_getMaxTrashDBRatio(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jhandle); + return sptr_sst_file_manager->get()->GetMaxTrashDBRatio(); +} + +/* + * Class: org_rocksdb_SstFileManager + * Method: setMaxTrashDBRatio + * Signature: (JD)V + */ +void Java_org_rocksdb_SstFileManager_setMaxTrashDBRatio(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jdouble jratio) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jhandle); + sptr_sst_file_manager->get()->SetMaxTrashDBRatio(jratio); +} + +/* + * Class: org_rocksdb_SstFileManager + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileManager_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* sptr_sst_file_manager = + reinterpret_cast*>( + jhandle); + delete sptr_sst_file_manager; +} diff --git a/src/rocksdb/java/rocksjni/sst_file_reader_iterator.cc b/src/rocksdb/java/rocksjni/sst_file_reader_iterator.cc new file mode 100644 index 000000000..68fa4c37c --- /dev/null +++ b/src/rocksdb/java/rocksjni/sst_file_reader_iterator.cc @@ -0,0 +1,373 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::Iterator methods from Java side. + +#include +#include +#include + +#include "include/org_rocksdb_SstFileReaderIterator.h" +#include "rocksdb/iterator.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileReaderIterator_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + assert(it != nullptr); + delete it; +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: isValid0 + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_SstFileReaderIterator_isValid0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + return reinterpret_cast(handle)->Valid(); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: seekToFirst0 + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileReaderIterator_seekToFirst0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->SeekToFirst(); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: seekToLast0 + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileReaderIterator_seekToLast0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->SeekToLast(); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: next0 + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileReaderIterator_next0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->Next(); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: prev0 + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileReaderIterator_prev0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->Prev(); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: seek0 + * Signature: (J[BI)V + */ +void Java_org_rocksdb_SstFileReaderIterator_seek0(JNIEnv* env, jobject /*jobj*/, + jlong handle, + jbyteArray jtarget, + jint jtarget_len) { + jbyte* target = env->GetByteArrayElements(jtarget, nullptr); + if (target == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + ROCKSDB_NAMESPACE::Slice target_slice(reinterpret_cast(target), + jtarget_len); + + auto* it = reinterpret_cast(handle); + it->Seek(target_slice); + + env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: seekForPrev0 + * Signature: (J[BI)V + */ +void Java_org_rocksdb_SstFileReaderIterator_seekForPrev0(JNIEnv* env, + jobject /*jobj*/, + jlong handle, + jbyteArray jtarget, + jint jtarget_len) { + jbyte* target = env->GetByteArrayElements(jtarget, nullptr); + if (target == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + ROCKSDB_NAMESPACE::Slice target_slice(reinterpret_cast(target), + jtarget_len); + + auto* it = reinterpret_cast(handle); + it->SeekForPrev(target_slice); + + env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: status0 + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileReaderIterator_status0(JNIEnv* env, + jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Status s = it->status(); + + if (s.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: key0 + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_SstFileReaderIterator_key0(JNIEnv* env, + jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice key_slice = it->key(); + + jbyteArray jkey = env->NewByteArray(static_cast(key_slice.size())); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkey, 0, static_cast(key_slice.size()), + const_cast(reinterpret_cast(key_slice.data()))); + return jkey; +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: value0 + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_SstFileReaderIterator_value0(JNIEnv* env, + jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice value_slice = it->value(); + + jbyteArray jkeyValue = + env->NewByteArray(static_cast(value_slice.size())); + if (jkeyValue == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkeyValue, 0, static_cast(value_slice.size()), + const_cast(reinterpret_cast(value_slice.data()))); + return jkeyValue; +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: keyDirect0 + * Signature: (JLjava/nio/ByteBuffer;II)I + */ +jint Java_org_rocksdb_SstFileReaderIterator_keyDirect0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget, + jint jtarget_off, jint jtarget_len) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice key_slice = it->key(); + return ROCKSDB_NAMESPACE::JniUtil::copyToDirect(env, key_slice, jtarget, + jtarget_off, jtarget_len); +} + +/* + * This method supports fetching into indirect byte buffers; + * the Java wrapper extracts the byte[] and passes it here. + * + * Class: org_rocksdb_SstFileReaderIterator + * Method: keyByteArray0 + * Signature: (J[BII)I + */ +jint Java_org_rocksdb_SstFileReaderIterator_keyByteArray0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jbyteArray jkey, jint jkey_off, + jint jkey_len) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice key_slice = it->key(); + auto slice_size = key_slice.size(); + jsize copy_size = std::min(static_cast(slice_size), + static_cast(jkey_len)); + env->SetByteArrayRegion( + jkey, jkey_off, copy_size, + const_cast(reinterpret_cast(key_slice.data()))); + + return static_cast(slice_size); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: valueDirect0 + * Signature: (JLjava/nio/ByteBuffer;II)I + */ +jint Java_org_rocksdb_SstFileReaderIterator_valueDirect0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget, + jint jtarget_off, jint jtarget_len) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice value_slice = it->value(); + return ROCKSDB_NAMESPACE::JniUtil::copyToDirect(env, value_slice, jtarget, + jtarget_off, jtarget_len); +} + +/* + * This method supports fetching into indirect byte buffers; + * the Java wrapper extracts the byte[] and passes it here. + * + * Class: org_rocksdb_SstFileReaderIterator + * Method: valueByteArray0 + * Signature: (J[BII)I + */ +jint Java_org_rocksdb_SstFileReaderIterator_valueByteArray0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jbyteArray jvalue_target, + jint jvalue_off, jint jvalue_len) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Slice value_slice = it->value(); + auto slice_size = value_slice.size(); + jsize copy_size = std::min(static_cast(slice_size), + static_cast(jvalue_len)); + env->SetByteArrayRegion( + jvalue_target, jvalue_off, copy_size, + const_cast(reinterpret_cast(value_slice.data()))); + + return static_cast(slice_size); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: seekDirect0 + * Signature: (JLjava/nio/ByteBuffer;II)V + */ +void Java_org_rocksdb_SstFileReaderIterator_seekDirect0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget, + jint jtarget_off, jint jtarget_len) { + auto* it = reinterpret_cast(handle); + auto seek = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) { + it->Seek(target_slice); + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_direct(seek, env, jtarget, jtarget_off, + jtarget_len); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: seekForPrevDirect0 + * Signature: (JLjava/nio/ByteBuffer;II)V + */ +void Java_org_rocksdb_SstFileReaderIterator_seekForPrevDirect0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget, + jint jtarget_off, jint jtarget_len) { + auto* it = reinterpret_cast(handle); + auto seekPrev = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) { + it->SeekForPrev(target_slice); + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_direct(seekPrev, env, jtarget, jtarget_off, + jtarget_len); +} + +/* + * This method supports fetching into indirect byte buffers; + * the Java wrapper extracts the byte[] and passes it here. + * + * Class: org_rocksdb_SstFileReaderIterator + * Method: seekByteArray0 + * Signature: (J[BII)V + */ +void Java_org_rocksdb_SstFileReaderIterator_seekByteArray0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jbyteArray jtarget, + jint jtarget_off, jint jtarget_len) { + const std::unique_ptr target(new char[jtarget_len]); + if (target == nullptr) { + jclass oom_class = env->FindClass("/lang/java/OutOfMemoryError"); + env->ThrowNew(oom_class, + "Memory allocation failed in RocksDB JNI function"); + return; + } + env->GetByteArrayRegion(jtarget, jtarget_off, jtarget_len, + reinterpret_cast(target.get())); + + ROCKSDB_NAMESPACE::Slice target_slice(target.get(), jtarget_len); + + auto* it = reinterpret_cast(handle); + it->Seek(target_slice); +} + +/* + * This method supports fetching into indirect byte buffers; + * the Java wrapper extracts the byte[] and passes it here. + * + * Class: org_rocksdb_SstFileReaderIterator + * Method: seekForPrevByteArray0 + * Signature: (J[BII)V + */ +void Java_org_rocksdb_SstFileReaderIterator_seekForPrevByteArray0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jbyteArray jtarget, + jint jtarget_off, jint jtarget_len) { + const std::unique_ptr target(new char[jtarget_len]); + if (target == nullptr) { + jclass oom_class = env->FindClass("/lang/java/OutOfMemoryError"); + env->ThrowNew(oom_class, + "Memory allocation failed in RocksDB JNI function"); + return; + } + env->GetByteArrayRegion(jtarget, jtarget_off, jtarget_len, + reinterpret_cast(target.get())); + + ROCKSDB_NAMESPACE::Slice target_slice(target.get(), jtarget_len); + + auto* it = reinterpret_cast(handle); + it->SeekForPrev(target_slice); +} + +/* + * Class: org_rocksdb_SstFileReaderIterator + * Method: refresh0 + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileReaderIterator_refresh0(JNIEnv* env, + jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Status s = it->Refresh(); + + if (s.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} diff --git a/src/rocksdb/java/rocksjni/sst_file_readerjni.cc b/src/rocksdb/java/rocksjni/sst_file_readerjni.cc new file mode 100644 index 000000000..7ef711842 --- /dev/null +++ b/src/rocksdb/java/rocksjni/sst_file_readerjni.cc @@ -0,0 +1,118 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling C++ ROCKSDB_NAMESPACE::SstFileReader methods +// from Java side. + +#include + +#include + +#include "include/org_rocksdb_SstFileReader.h" +#include "rocksdb/comparator.h" +#include "rocksdb/env.h" +#include "rocksdb/options.h" +#include "rocksdb/sst_file_reader.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_SstFileReader + * Method: newSstFileReader + * Signature: (J)J + */ +jlong Java_org_rocksdb_SstFileReader_newSstFileReader(JNIEnv * /*env*/, + jclass /*jcls*/, + jlong joptions) { + auto *options = + reinterpret_cast(joptions); + ROCKSDB_NAMESPACE::SstFileReader *sst_file_reader = + new ROCKSDB_NAMESPACE::SstFileReader(*options); + return GET_CPLUSPLUS_POINTER(sst_file_reader); +} + +/* + * Class: org_rocksdb_SstFileReader + * Method: open + * Signature: (JLjava/lang/String;)V + */ +void Java_org_rocksdb_SstFileReader_open(JNIEnv *env, jobject /*jobj*/, + jlong jhandle, jstring jfile_path) { + const char *file_path = env->GetStringUTFChars(jfile_path, nullptr); + if (file_path == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + ROCKSDB_NAMESPACE::Status s = + reinterpret_cast(jhandle)->Open( + file_path); + env->ReleaseStringUTFChars(jfile_path, file_path); + + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_SstFileReader + * Method: newIterator + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_SstFileReader_newIterator(JNIEnv * /*env*/, + jobject /*jobj*/, + jlong jhandle, + jlong jread_options_handle) { + auto *sst_file_reader = + reinterpret_cast(jhandle); + auto *read_options = + reinterpret_cast(jread_options_handle); + return GET_CPLUSPLUS_POINTER(sst_file_reader->NewIterator(*read_options)); +} + +/* + * Class: org_rocksdb_SstFileReader + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileReader_disposeInternal(JNIEnv * /*env*/, + jobject /*jobj*/, + jlong jhandle) { + delete reinterpret_cast(jhandle); +} + +/* + * Class: org_rocksdb_SstFileReader + * Method: verifyChecksum + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileReader_verifyChecksum(JNIEnv *env, + jobject /*jobj*/, + jlong jhandle) { + auto *sst_file_reader = + reinterpret_cast(jhandle); + auto s = sst_file_reader->VerifyChecksum(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_SstFileReader + * Method: getTableProperties + * Signature: (J)J + */ +jobject Java_org_rocksdb_SstFileReader_getTableProperties(JNIEnv *env, + jobject /*jobj*/, + jlong jhandle) { + auto *sst_file_reader = + reinterpret_cast(jhandle); + std::shared_ptr tp = + sst_file_reader->GetTableProperties(); + jobject jtable_properties = + ROCKSDB_NAMESPACE::TablePropertiesJni::fromCppTableProperties( + env, *(tp.get())); + return jtable_properties; +} diff --git a/src/rocksdb/java/rocksjni/sst_file_writerjni.cc b/src/rocksdb/java/rocksjni/sst_file_writerjni.cc new file mode 100644 index 000000000..1898c3cfc --- /dev/null +++ b/src/rocksdb/java/rocksjni/sst_file_writerjni.cc @@ -0,0 +1,310 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling C++ ROCKSDB_NAMESPACE::SstFileWriter methods +// from Java side. + +#include + +#include + +#include "include/org_rocksdb_SstFileWriter.h" +#include "rocksdb/comparator.h" +#include "rocksdb/env.h" +#include "rocksdb/options.h" +#include "rocksdb/sst_file_writer.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_SstFileWriter + * Method: newSstFileWriter + * Signature: (JJJB)J + */ +jlong Java_org_rocksdb_SstFileWriter_newSstFileWriter__JJJB( + JNIEnv * /*env*/, jclass /*jcls*/, jlong jenvoptions, jlong joptions, + jlong jcomparator_handle, jbyte jcomparator_type) { + ROCKSDB_NAMESPACE::Comparator *comparator = nullptr; + switch (jcomparator_type) { + // JAVA_COMPARATOR + case 0x0: + comparator = reinterpret_cast( + jcomparator_handle); + break; + + // JAVA_NATIVE_COMPARATOR_WRAPPER + case 0x1: + comparator = + reinterpret_cast(jcomparator_handle); + break; + } + auto *env_options = + reinterpret_cast(jenvoptions); + auto *options = + reinterpret_cast(joptions); + ROCKSDB_NAMESPACE::SstFileWriter *sst_file_writer = + new ROCKSDB_NAMESPACE::SstFileWriter(*env_options, *options, comparator); + return GET_CPLUSPLUS_POINTER(sst_file_writer); +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: newSstFileWriter + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_SstFileWriter_newSstFileWriter__JJ(JNIEnv * /*env*/, + jclass /*jcls*/, + jlong jenvoptions, + jlong joptions) { + auto *env_options = + reinterpret_cast(jenvoptions); + auto *options = + reinterpret_cast(joptions); + ROCKSDB_NAMESPACE::SstFileWriter *sst_file_writer = + new ROCKSDB_NAMESPACE::SstFileWriter(*env_options, *options); + return GET_CPLUSPLUS_POINTER(sst_file_writer); +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: open + * Signature: (JLjava/lang/String;)V + */ +void Java_org_rocksdb_SstFileWriter_open(JNIEnv *env, jobject /*jobj*/, + jlong jhandle, jstring jfile_path) { + const char *file_path = env->GetStringUTFChars(jfile_path, nullptr); + if (file_path == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + ROCKSDB_NAMESPACE::Status s = + reinterpret_cast(jhandle)->Open( + file_path); + env->ReleaseStringUTFChars(jfile_path, file_path); + + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: put + * Signature: (JJJ)V + */ +void Java_org_rocksdb_SstFileWriter_put__JJJ(JNIEnv *env, jobject /*jobj*/, + jlong jhandle, jlong jkey_handle, + jlong jvalue_handle) { + auto *key_slice = reinterpret_cast(jkey_handle); + auto *value_slice = + reinterpret_cast(jvalue_handle); + ROCKSDB_NAMESPACE::Status s = + reinterpret_cast(jhandle)->Put( + *key_slice, *value_slice); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: put + * Signature: (JJJ)V + */ +void Java_org_rocksdb_SstFileWriter_put__J_3B_3B(JNIEnv *env, jobject /*jobj*/, + jlong jhandle, jbyteArray jkey, + jbyteArray jval) { + jbyte *key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), + env->GetArrayLength(jkey)); + + jbyte *value = env->GetByteArrayElements(jval, nullptr); + if (value == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + return; + } + ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast(value), + env->GetArrayLength(jval)); + + ROCKSDB_NAMESPACE::Status s = + reinterpret_cast(jhandle)->Put( + key_slice, value_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + env->ReleaseByteArrayElements(jval, value, JNI_ABORT); + + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: putDirect + * Signature: (JLjava/nio/ByteBuffer;IILjava/nio/ByteBuffer;II)V + */ +void Java_org_rocksdb_SstFileWriter_putDirect(JNIEnv *env, jobject /*jdb*/, + jlong jdb_handle, jobject jkey, + jint jkey_off, jint jkey_len, + jobject jval, jint jval_off, + jint jval_len) { + auto *writer = + reinterpret_cast(jdb_handle); + auto put = [&env, &writer](ROCKSDB_NAMESPACE::Slice &key, + ROCKSDB_NAMESPACE::Slice &value) { + ROCKSDB_NAMESPACE::Status s = writer->Put(key, value); + if (s.ok()) { + return; + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + }; + ROCKSDB_NAMESPACE::JniUtil::kv_op_direct(put, env, jkey, jkey_off, jkey_len, + jval, jval_off, jval_len); +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: fileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_SstFileWriter_fileSize(JNIEnv * /*env*/, jobject /*jdb*/, + jlong jdb_handle) { + auto *writer = + reinterpret_cast(jdb_handle); + return static_cast(writer->FileSize()); +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: merge + * Signature: (JJJ)V + */ +void Java_org_rocksdb_SstFileWriter_merge__JJJ(JNIEnv *env, jobject /*jobj*/, + jlong jhandle, jlong jkey_handle, + jlong jvalue_handle) { + auto *key_slice = reinterpret_cast(jkey_handle); + auto *value_slice = + reinterpret_cast(jvalue_handle); + ROCKSDB_NAMESPACE::Status s = + reinterpret_cast(jhandle)->Merge( + *key_slice, *value_slice); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: merge + * Signature: (J[B[B)V + */ +void Java_org_rocksdb_SstFileWriter_merge__J_3B_3B(JNIEnv *env, + jobject /*jobj*/, + jlong jhandle, + jbyteArray jkey, + jbyteArray jval) { + jbyte *key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), + env->GetArrayLength(jkey)); + + jbyte *value = env->GetByteArrayElements(jval, nullptr); + if (value == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + return; + } + ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast(value), + env->GetArrayLength(jval)); + + ROCKSDB_NAMESPACE::Status s = + reinterpret_cast(jhandle)->Merge( + key_slice, value_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + env->ReleaseByteArrayElements(jval, value, JNI_ABORT); + + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: delete + * Signature: (JJJ)V + */ +void Java_org_rocksdb_SstFileWriter_delete__J_3B(JNIEnv *env, jobject /*jobj*/, + jlong jhandle, + jbyteArray jkey) { + jbyte *key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), + env->GetArrayLength(jkey)); + + ROCKSDB_NAMESPACE::Status s = + reinterpret_cast(jhandle)->Delete( + key_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: delete + * Signature: (JJJ)V + */ +void Java_org_rocksdb_SstFileWriter_delete__JJ(JNIEnv *env, jobject /*jobj*/, + jlong jhandle, + jlong jkey_handle) { + auto *key_slice = reinterpret_cast(jkey_handle); + ROCKSDB_NAMESPACE::Status s = + reinterpret_cast(jhandle)->Delete( + *key_slice); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: finish + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileWriter_finish(JNIEnv *env, jobject /*jobj*/, + jlong jhandle) { + ROCKSDB_NAMESPACE::Status s = + reinterpret_cast(jhandle)->Finish(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_SstFileWriter + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_SstFileWriter_disposeInternal(JNIEnv * /*env*/, + jobject /*jobj*/, + jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/src/rocksdb/java/rocksjni/sst_partitioner.cc b/src/rocksdb/java/rocksjni/sst_partitioner.cc new file mode 100644 index 000000000..1cea3b0cb --- /dev/null +++ b/src/rocksdb/java/rocksjni/sst_partitioner.cc @@ -0,0 +1,43 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling C++ ROCKSDB_NAMESPACE::SstFileManager methods +// from Java side. + +#include "rocksdb/sst_partitioner.h" + +#include + +#include + +#include "include/org_rocksdb_SstPartitionerFixedPrefixFactory.h" +#include "rocksdb/sst_file_manager.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_SstPartitionerFixedPrefixFactory + * Method: newSstPartitionerFixedPrefixFactory0 + * Signature: (J)J + */ +jlong Java_org_rocksdb_SstPartitionerFixedPrefixFactory_newSstPartitionerFixedPrefixFactory0( + JNIEnv*, jclass, jlong prefix_len) { + auto* ptr = new std::shared_ptr( + ROCKSDB_NAMESPACE::NewSstPartitionerFixedPrefixFactory(prefix_len)); + return GET_CPLUSPLUS_POINTER(ptr); +} + +/* + * Class: org_rocksdb_SstPartitionerFixedPrefixFactory + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_SstPartitionerFixedPrefixFactory_disposeInternal( + JNIEnv*, jobject, jlong jhandle) { + auto* ptr = reinterpret_cast< + std::shared_ptr*>(jhandle); + delete ptr; // delete std::shared_ptr +} diff --git a/src/rocksdb/java/rocksjni/statistics.cc b/src/rocksdb/java/rocksjni/statistics.cc new file mode 100644 index 000000000..bd405afa1 --- /dev/null +++ b/src/rocksdb/java/rocksjni/statistics.cc @@ -0,0 +1,268 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::Statistics methods from Java side. + +#include "rocksdb/statistics.h" + +#include + +#include +#include + +#include "include/org_rocksdb_Statistics.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" +#include "rocksjni/statisticsjni.h" + +/* + * Class: org_rocksdb_Statistics + * Method: newStatistics + * Signature: ()J + */ +jlong Java_org_rocksdb_Statistics_newStatistics__(JNIEnv* env, jclass jcls) { + return Java_org_rocksdb_Statistics_newStatistics___3BJ(env, jcls, nullptr, 0); +} + +/* + * Class: org_rocksdb_Statistics + * Method: newStatistics + * Signature: (J)J + */ +jlong Java_org_rocksdb_Statistics_newStatistics__J( + JNIEnv* env, jclass jcls, jlong jother_statistics_handle) { + return Java_org_rocksdb_Statistics_newStatistics___3BJ( + env, jcls, nullptr, jother_statistics_handle); +} + +/* + * Class: org_rocksdb_Statistics + * Method: newStatistics + * Signature: ([B)J + */ +jlong Java_org_rocksdb_Statistics_newStatistics___3B(JNIEnv* env, jclass jcls, + jbyteArray jhistograms) { + return Java_org_rocksdb_Statistics_newStatistics___3BJ(env, jcls, jhistograms, + 0); +} + +/* + * Class: org_rocksdb_Statistics + * Method: newStatistics + * Signature: ([BJ)J + */ +jlong Java_org_rocksdb_Statistics_newStatistics___3BJ( + JNIEnv* env, jclass, jbyteArray jhistograms, + jlong jother_statistics_handle) { + std::shared_ptr* pSptr_other_statistics = + nullptr; + if (jother_statistics_handle > 0) { + pSptr_other_statistics = + reinterpret_cast*>( + jother_statistics_handle); + } + + std::set histograms; + if (jhistograms != nullptr) { + const jsize len = env->GetArrayLength(jhistograms); + if (len > 0) { + jbyte* jhistogram = env->GetByteArrayElements(jhistograms, nullptr); + if (jhistogram == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + + for (jsize i = 0; i < len; i++) { + const ROCKSDB_NAMESPACE::Histograms histogram = + ROCKSDB_NAMESPACE::HistogramTypeJni::toCppHistograms(jhistogram[i]); + histograms.emplace(histogram); + } + + env->ReleaseByteArrayElements(jhistograms, jhistogram, JNI_ABORT); + } + } + + std::shared_ptr sptr_other_statistics = + nullptr; + if (pSptr_other_statistics != nullptr) { + sptr_other_statistics = *pSptr_other_statistics; + } + + auto* pSptr_statistics = + new std::shared_ptr( + new ROCKSDB_NAMESPACE::StatisticsJni(sptr_other_statistics, + histograms)); + + return GET_CPLUSPLUS_POINTER(pSptr_statistics); +} + +/* + * Class: org_rocksdb_Statistics + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_Statistics_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + if (jhandle > 0) { + auto* pSptr_statistics = + reinterpret_cast*>( + jhandle); + delete pSptr_statistics; + } +} + +/* + * Class: org_rocksdb_Statistics + * Method: statsLevel + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Statistics_statsLevel(JNIEnv*, jobject, jlong jhandle) { + auto* pSptr_statistics = + reinterpret_cast*>( + jhandle); + assert(pSptr_statistics != nullptr); + return ROCKSDB_NAMESPACE::StatsLevelJni::toJavaStatsLevel( + pSptr_statistics->get()->get_stats_level()); +} + +/* + * Class: org_rocksdb_Statistics + * Method: setStatsLevel + * Signature: (JB)V + */ +void Java_org_rocksdb_Statistics_setStatsLevel(JNIEnv*, jobject, jlong jhandle, + jbyte jstats_level) { + auto* pSptr_statistics = + reinterpret_cast*>( + jhandle); + assert(pSptr_statistics != nullptr); + auto stats_level = + ROCKSDB_NAMESPACE::StatsLevelJni::toCppStatsLevel(jstats_level); + pSptr_statistics->get()->set_stats_level(stats_level); +} + +/* + * Class: org_rocksdb_Statistics + * Method: getTickerCount + * Signature: (JB)J + */ +jlong Java_org_rocksdb_Statistics_getTickerCount(JNIEnv*, jobject, + jlong jhandle, + jbyte jticker_type) { + auto* pSptr_statistics = + reinterpret_cast*>( + jhandle); + assert(pSptr_statistics != nullptr); + auto ticker = ROCKSDB_NAMESPACE::TickerTypeJni::toCppTickers(jticker_type); + uint64_t count = pSptr_statistics->get()->getTickerCount(ticker); + return static_cast(count); +} + +/* + * Class: org_rocksdb_Statistics + * Method: getAndResetTickerCount + * Signature: (JB)J + */ +jlong Java_org_rocksdb_Statistics_getAndResetTickerCount(JNIEnv*, jobject, + jlong jhandle, + jbyte jticker_type) { + auto* pSptr_statistics = + reinterpret_cast*>( + jhandle); + assert(pSptr_statistics != nullptr); + auto ticker = ROCKSDB_NAMESPACE::TickerTypeJni::toCppTickers(jticker_type); + return pSptr_statistics->get()->getAndResetTickerCount(ticker); +} + +/* + * Class: org_rocksdb_Statistics + * Method: getHistogramData + * Signature: (JB)Lorg/rocksdb/HistogramData; + */ +jobject Java_org_rocksdb_Statistics_getHistogramData(JNIEnv* env, jobject, + jlong jhandle, + jbyte jhistogram_type) { + auto* pSptr_statistics = + reinterpret_cast*>( + jhandle); + assert(pSptr_statistics != nullptr); + + // TODO(AR) perhaps better to construct a Java Object Wrapper that + // uses ptr to C++ `new HistogramData` + ROCKSDB_NAMESPACE::HistogramData data; + + auto histogram = + ROCKSDB_NAMESPACE::HistogramTypeJni::toCppHistograms(jhistogram_type); + pSptr_statistics->get()->histogramData( + static_cast(histogram), &data); + + jclass jclazz = ROCKSDB_NAMESPACE::HistogramDataJni::getJClass(env); + if (jclazz == nullptr) { + // exception occurred accessing class + return nullptr; + } + + jmethodID mid = + ROCKSDB_NAMESPACE::HistogramDataJni::getConstructorMethodId(env); + if (mid == nullptr) { + // exception occurred accessing method + return nullptr; + } + + return env->NewObject(jclazz, mid, data.median, data.percentile95, + data.percentile99, data.average, + data.standard_deviation, data.max, data.count, data.sum, + data.min); +} + +/* + * Class: org_rocksdb_Statistics + * Method: getHistogramString + * Signature: (JB)Ljava/lang/String; + */ +jstring Java_org_rocksdb_Statistics_getHistogramString(JNIEnv* env, jobject, + jlong jhandle, + jbyte jhistogram_type) { + auto* pSptr_statistics = + reinterpret_cast*>( + jhandle); + assert(pSptr_statistics != nullptr); + auto histogram = + ROCKSDB_NAMESPACE::HistogramTypeJni::toCppHistograms(jhistogram_type); + auto str = pSptr_statistics->get()->getHistogramString(histogram); + return env->NewStringUTF(str.c_str()); +} + +/* + * Class: org_rocksdb_Statistics + * Method: reset + * Signature: (J)V + */ +void Java_org_rocksdb_Statistics_reset(JNIEnv* env, jobject, jlong jhandle) { + auto* pSptr_statistics = + reinterpret_cast*>( + jhandle); + assert(pSptr_statistics != nullptr); + ROCKSDB_NAMESPACE::Status s = pSptr_statistics->get()->Reset(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Statistics + * Method: toString + * Signature: (J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_Statistics_toString(JNIEnv* env, jobject, + jlong jhandle) { + auto* pSptr_statistics = + reinterpret_cast*>( + jhandle); + assert(pSptr_statistics != nullptr); + auto str = pSptr_statistics->get()->ToString(); + return env->NewStringUTF(str.c_str()); +} diff --git a/src/rocksdb/java/rocksjni/statisticsjni.cc b/src/rocksdb/java/rocksjni/statisticsjni.cc new file mode 100644 index 000000000..f46337893 --- /dev/null +++ b/src/rocksdb/java/rocksjni/statisticsjni.cc @@ -0,0 +1,31 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Statistics + +#include "rocksjni/statisticsjni.h" + +namespace ROCKSDB_NAMESPACE { + +StatisticsJni::StatisticsJni(std::shared_ptr stats) + : StatisticsImpl(stats), m_ignore_histograms() {} + +StatisticsJni::StatisticsJni(std::shared_ptr stats, + const std::set ignore_histograms) + : StatisticsImpl(stats), m_ignore_histograms(ignore_histograms) {} + +bool StatisticsJni::HistEnabledForType(uint32_t type) const { + if (type >= HISTOGRAM_ENUM_MAX) { + return false; + } + + if (m_ignore_histograms.count(type) > 0) { + return false; + } + + return true; +} +} // namespace ROCKSDB_NAMESPACE diff --git a/src/rocksdb/java/rocksjni/statisticsjni.h b/src/rocksdb/java/rocksjni/statisticsjni.h new file mode 100644 index 000000000..ce823f9b1 --- /dev/null +++ b/src/rocksdb/java/rocksjni/statisticsjni.h @@ -0,0 +1,34 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Statistics + +#ifndef JAVA_ROCKSJNI_STATISTICSJNI_H_ +#define JAVA_ROCKSJNI_STATISTICSJNI_H_ + +#include +#include +#include + +#include "monitoring/statistics.h" +#include "rocksdb/statistics.h" + +namespace ROCKSDB_NAMESPACE { + +class StatisticsJni : public StatisticsImpl { + public: + StatisticsJni(std::shared_ptr stats); + StatisticsJni(std::shared_ptr stats, + const std::set ignore_histograms); + virtual bool HistEnabledForType(uint32_t type) const override; + + private: + const std::set m_ignore_histograms; +}; + +} // namespace ROCKSDB_NAMESPACE + +#endif // JAVA_ROCKSJNI_STATISTICSJNI_H_ diff --git a/src/rocksdb/java/rocksjni/table.cc b/src/rocksdb/java/rocksjni/table.cc new file mode 100644 index 000000000..0054e5c1f --- /dev/null +++ b/src/rocksdb/java/rocksjni/table.cc @@ -0,0 +1,161 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Options. + +#include "rocksdb/table.h" + +#include + +#include "include/org_rocksdb_BlockBasedTableConfig.h" +#include "include/org_rocksdb_PlainTableConfig.h" +#include "portal.h" +#include "rocksdb/cache.h" +#include "rocksdb/filter_policy.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_PlainTableConfig + * Method: newTableFactoryHandle + * Signature: (IIDIIBZZ)J + */ +jlong Java_org_rocksdb_PlainTableConfig_newTableFactoryHandle( + JNIEnv * /*env*/, jobject /*jobj*/, jint jkey_size, + jint jbloom_bits_per_key, jdouble jhash_table_ratio, jint jindex_sparseness, + jint jhuge_page_tlb_size, jbyte jencoding_type, jboolean jfull_scan_mode, + jboolean jstore_index_in_file) { + ROCKSDB_NAMESPACE::PlainTableOptions options = + ROCKSDB_NAMESPACE::PlainTableOptions(); + options.user_key_len = jkey_size; + options.bloom_bits_per_key = jbloom_bits_per_key; + options.hash_table_ratio = jhash_table_ratio; + options.index_sparseness = jindex_sparseness; + options.huge_page_tlb_size = jhuge_page_tlb_size; + options.encoding_type = + static_cast(jencoding_type); + options.full_scan_mode = jfull_scan_mode; + options.store_index_in_file = jstore_index_in_file; + return GET_CPLUSPLUS_POINTER( + ROCKSDB_NAMESPACE::NewPlainTableFactory(options)); +} + +/* + * Class: org_rocksdb_BlockBasedTableConfig + * Method: newTableFactoryHandle + * Signature: (ZZZZBBDBZJJJJIIIJZZZJZZIIZZBJIJI)J + */ +jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle( + JNIEnv *, jobject, jboolean jcache_index_and_filter_blocks, + jboolean jcache_index_and_filter_blocks_with_high_priority, + jboolean jpin_l0_filter_and_index_blocks_in_cache, + jboolean jpin_top_level_index_and_filter, jbyte jindex_type_value, + jbyte jdata_block_index_type_value, + jdouble jdata_block_hash_table_util_ratio, jbyte jchecksum_type_value, + jboolean jno_block_cache, jlong jblock_cache_handle, + jlong jpersistent_cache_handle, jlong jblock_cache_compressed_handle, + jlong jblock_size, jint jblock_size_deviation, jint jblock_restart_interval, + jint jindex_block_restart_interval, jlong jmetadata_block_size, + jboolean jpartition_filters, jboolean joptimize_filters_for_memory, + jboolean juse_delta_encoding, jlong jfilter_policy_handle, + jboolean jwhole_key_filtering, jboolean jverify_compression, + jint jread_amp_bytes_per_bit, jint jformat_version, + jboolean jenable_index_compression, jboolean jblock_align, + jbyte jindex_shortening, jlong jblock_cache_size, + jint jblock_cache_num_shard_bits, jlong jblock_cache_compressed_size, + jint jblock_cache_compressed_num_shard_bits) { + ROCKSDB_NAMESPACE::BlockBasedTableOptions options; + options.cache_index_and_filter_blocks = + static_cast(jcache_index_and_filter_blocks); + options.cache_index_and_filter_blocks_with_high_priority = + static_cast(jcache_index_and_filter_blocks_with_high_priority); + options.pin_l0_filter_and_index_blocks_in_cache = + static_cast(jpin_l0_filter_and_index_blocks_in_cache); + options.pin_top_level_index_and_filter = + static_cast(jpin_top_level_index_and_filter); + options.index_type = + ROCKSDB_NAMESPACE::IndexTypeJni::toCppIndexType(jindex_type_value); + options.data_block_index_type = + ROCKSDB_NAMESPACE::DataBlockIndexTypeJni::toCppDataBlockIndexType( + jdata_block_index_type_value); + options.data_block_hash_table_util_ratio = + static_cast(jdata_block_hash_table_util_ratio); + options.checksum = ROCKSDB_NAMESPACE::ChecksumTypeJni::toCppChecksumType( + jchecksum_type_value); + options.no_block_cache = static_cast(jno_block_cache); + if (options.no_block_cache) { + options.block_cache = nullptr; + } else { + if (jblock_cache_handle > 0) { + std::shared_ptr *pCache = + reinterpret_cast *>( + jblock_cache_handle); + options.block_cache = *pCache; + } else if (jblock_cache_size >= 0) { + if (jblock_cache_num_shard_bits > 0) { + options.block_cache = ROCKSDB_NAMESPACE::NewLRUCache( + static_cast(jblock_cache_size), + static_cast(jblock_cache_num_shard_bits)); + } else { + options.block_cache = ROCKSDB_NAMESPACE::NewLRUCache( + static_cast(jblock_cache_size)); + } + } else { + options.no_block_cache = true; + options.block_cache = nullptr; + } + } + if (jpersistent_cache_handle > 0) { + std::shared_ptr *pCache = + reinterpret_cast *>( + jpersistent_cache_handle); + options.persistent_cache = *pCache; + } + if (jblock_cache_compressed_handle > 0) { + std::shared_ptr *pCache = + reinterpret_cast *>( + jblock_cache_compressed_handle); + options.block_cache_compressed = *pCache; + } else if (jblock_cache_compressed_size > 0) { + if (jblock_cache_compressed_num_shard_bits > 0) { + options.block_cache_compressed = ROCKSDB_NAMESPACE::NewLRUCache( + static_cast(jblock_cache_compressed_size), + static_cast(jblock_cache_compressed_num_shard_bits)); + } else { + options.block_cache_compressed = ROCKSDB_NAMESPACE::NewLRUCache( + static_cast(jblock_cache_compressed_size)); + } + } + options.block_size = static_cast(jblock_size); + options.block_size_deviation = static_cast(jblock_size_deviation); + options.block_restart_interval = static_cast(jblock_restart_interval); + options.index_block_restart_interval = + static_cast(jindex_block_restart_interval); + options.metadata_block_size = static_cast(jmetadata_block_size); + options.partition_filters = static_cast(jpartition_filters); + options.optimize_filters_for_memory = + static_cast(joptimize_filters_for_memory); + options.use_delta_encoding = static_cast(juse_delta_encoding); + if (jfilter_policy_handle > 0) { + std::shared_ptr *pFilterPolicy = + reinterpret_cast *>( + jfilter_policy_handle); + options.filter_policy = *pFilterPolicy; + } + options.whole_key_filtering = static_cast(jwhole_key_filtering); + options.verify_compression = static_cast(jverify_compression); + options.read_amp_bytes_per_bit = + static_cast(jread_amp_bytes_per_bit); + options.format_version = static_cast(jformat_version); + options.enable_index_compression = + static_cast(jenable_index_compression); + options.block_align = static_cast(jblock_align); + options.index_shortening = + ROCKSDB_NAMESPACE::IndexShorteningModeJni::toCppIndexShorteningMode( + jindex_shortening); + + return GET_CPLUSPLUS_POINTER( + ROCKSDB_NAMESPACE::NewBlockBasedTableFactory(options)); +} diff --git a/src/rocksdb/java/rocksjni/table_filter.cc b/src/rocksdb/java/rocksjni/table_filter.cc new file mode 100644 index 000000000..1400fa1d9 --- /dev/null +++ b/src/rocksdb/java/rocksjni/table_filter.cc @@ -0,0 +1,27 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// org.rocksdb.AbstractTableFilter. + +#include + +#include + +#include "include/org_rocksdb_AbstractTableFilter.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/table_filter_jnicallback.h" + +/* + * Class: org_rocksdb_AbstractTableFilter + * Method: createNewTableFilter + * Signature: ()J + */ +jlong Java_org_rocksdb_AbstractTableFilter_createNewTableFilter( + JNIEnv* env, jobject jtable_filter) { + auto* table_filter_jnicallback = + new ROCKSDB_NAMESPACE::TableFilterJniCallback(env, jtable_filter); + return GET_CPLUSPLUS_POINTER(table_filter_jnicallback); +} diff --git a/src/rocksdb/java/rocksjni/table_filter_jnicallback.cc b/src/rocksdb/java/rocksjni/table_filter_jnicallback.cc new file mode 100644 index 000000000..5350c5cee --- /dev/null +++ b/src/rocksdb/java/rocksjni/table_filter_jnicallback.cc @@ -0,0 +1,66 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::TableFilter. + +#include "rocksjni/table_filter_jnicallback.h" + +#include "rocksjni/portal.h" + +namespace ROCKSDB_NAMESPACE { +TableFilterJniCallback::TableFilterJniCallback(JNIEnv* env, + jobject jtable_filter) + : JniCallback(env, jtable_filter) { + m_jfilter_methodid = AbstractTableFilterJni::getFilterMethod(env); + if (m_jfilter_methodid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + // create the function reference + /* + Note the JNI ENV must be obtained/release + on each call to the function itself as + it may be called from multiple threads + */ + m_table_filter_function = + [this](const ROCKSDB_NAMESPACE::TableProperties& table_properties) { + jboolean attached_thread = JNI_FALSE; + JNIEnv* thread_env = getJniEnv(&attached_thread); + assert(thread_env != nullptr); + + // create a Java TableProperties object + jobject jtable_properties = TablePropertiesJni::fromCppTableProperties( + thread_env, table_properties); + if (jtable_properties == nullptr) { + // exception thrown from fromCppTableProperties + thread_env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return false; + } + + jboolean result = thread_env->CallBooleanMethod( + m_jcallback_obj, m_jfilter_methodid, jtable_properties); + if (thread_env->ExceptionCheck()) { + // exception thrown from CallBooleanMethod + thread_env->DeleteLocalRef(jtable_properties); + thread_env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return false; + } + + // ok... cleanup and then return + releaseJniEnv(attached_thread); + return static_cast(result); + }; +} + +std::function +TableFilterJniCallback::GetTableFilterFunction() { + return m_table_filter_function; +} + +} // namespace ROCKSDB_NAMESPACE diff --git a/src/rocksdb/java/rocksjni/table_filter_jnicallback.h b/src/rocksdb/java/rocksjni/table_filter_jnicallback.h new file mode 100644 index 000000000..0ef404ca2 --- /dev/null +++ b/src/rocksdb/java/rocksjni/table_filter_jnicallback.h @@ -0,0 +1,36 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::TableFilter. + +#ifndef JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_ +#define JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_ + +#include + +#include +#include + +#include "rocksdb/table_properties.h" +#include "rocksjni/jnicallback.h" + +namespace ROCKSDB_NAMESPACE { + +class TableFilterJniCallback : public JniCallback { + public: + TableFilterJniCallback(JNIEnv* env, jobject jtable_filter); + std::function + GetTableFilterFunction(); + + private: + jmethodID m_jfilter_methodid; + std::function + m_table_filter_function; +}; + +} // namespace ROCKSDB_NAMESPACE + +#endif // JAVA_ROCKSJNI_TABLE_FILTER_JNICALLBACK_H_ diff --git a/src/rocksdb/java/rocksjni/testable_event_listener.cc b/src/rocksdb/java/rocksjni/testable_event_listener.cc new file mode 100644 index 000000000..71188bc3c --- /dev/null +++ b/src/rocksdb/java/rocksjni/testable_event_listener.cc @@ -0,0 +1,219 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +#include +#include +#include +#include + +#include "include/org_rocksdb_test_TestableEventListener.h" +#include "rocksdb/listener.h" +#include "rocksdb/status.h" +#include "rocksdb/table_properties.h" + +using ROCKSDB_NAMESPACE::BackgroundErrorReason; +using ROCKSDB_NAMESPACE::CompactionJobInfo; +using ROCKSDB_NAMESPACE::CompactionJobStats; +using ROCKSDB_NAMESPACE::CompactionReason; +using ROCKSDB_NAMESPACE::CompressionType; +using ROCKSDB_NAMESPACE::ExternalFileIngestionInfo; +using ROCKSDB_NAMESPACE::FileOperationInfo; +using ROCKSDB_NAMESPACE::FileOperationType; +using ROCKSDB_NAMESPACE::FlushJobInfo; +using ROCKSDB_NAMESPACE::FlushReason; +using ROCKSDB_NAMESPACE::MemTableInfo; +using ROCKSDB_NAMESPACE::Status; +using ROCKSDB_NAMESPACE::TableFileCreationBriefInfo; +using ROCKSDB_NAMESPACE::TableFileCreationInfo; +using ROCKSDB_NAMESPACE::TableFileCreationReason; +using ROCKSDB_NAMESPACE::TableFileDeletionInfo; +using ROCKSDB_NAMESPACE::TableProperties; +using ROCKSDB_NAMESPACE::WriteStallCondition; +using ROCKSDB_NAMESPACE::WriteStallInfo; + +static TableProperties newTablePropertiesForTest() { + TableProperties table_properties; + table_properties.data_size = UINT64_MAX; + table_properties.index_size = UINT64_MAX; + table_properties.index_partitions = UINT64_MAX; + table_properties.top_level_index_size = UINT64_MAX; + table_properties.index_key_is_user_key = UINT64_MAX; + table_properties.index_value_is_delta_encoded = UINT64_MAX; + table_properties.filter_size = UINT64_MAX; + table_properties.raw_key_size = UINT64_MAX; + table_properties.raw_value_size = UINT64_MAX; + table_properties.num_data_blocks = UINT64_MAX; + table_properties.num_entries = UINT64_MAX; + table_properties.num_deletions = UINT64_MAX; + table_properties.num_merge_operands = UINT64_MAX; + table_properties.num_range_deletions = UINT64_MAX; + table_properties.format_version = UINT64_MAX; + table_properties.fixed_key_len = UINT64_MAX; + table_properties.column_family_id = UINT64_MAX; + table_properties.creation_time = UINT64_MAX; + table_properties.oldest_key_time = UINT64_MAX; + table_properties.file_creation_time = UINT64_MAX; + table_properties.slow_compression_estimated_data_size = UINT64_MAX; + table_properties.fast_compression_estimated_data_size = UINT64_MAX; + table_properties.external_sst_file_global_seqno_offset = UINT64_MAX; + table_properties.db_id = "dbId"; + table_properties.db_session_id = "sessionId"; + table_properties.column_family_name = "columnFamilyName"; + table_properties.filter_policy_name = "filterPolicyName"; + table_properties.comparator_name = "comparatorName"; + table_properties.merge_operator_name = "mergeOperatorName"; + table_properties.prefix_extractor_name = "prefixExtractorName"; + table_properties.property_collectors_names = "propertyCollectorsNames"; + table_properties.compression_name = "compressionName"; + table_properties.compression_options = "compressionOptions"; + table_properties.user_collected_properties = {{"key", "value"}}; + table_properties.readable_properties = {{"key", "value"}}; + return table_properties; +} + +/* + * Class: org_rocksdb_test_TestableEventListener + * Method: invokeAllCallbacks + * Signature: (J)V + */ +void Java_org_rocksdb_test_TestableEventListener_invokeAllCallbacks( + JNIEnv *, jclass, jlong jhandle) { + const auto &el = + *reinterpret_cast *>( + jhandle); + + TableProperties table_properties = newTablePropertiesForTest(); + + FlushJobInfo flush_job_info; + flush_job_info.cf_id = INT_MAX; + flush_job_info.cf_name = "testColumnFamily"; + flush_job_info.file_path = "/file/path"; + flush_job_info.file_number = UINT64_MAX; + flush_job_info.oldest_blob_file_number = UINT64_MAX; + flush_job_info.thread_id = UINT64_MAX; + flush_job_info.job_id = INT_MAX; + flush_job_info.triggered_writes_slowdown = true; + flush_job_info.triggered_writes_stop = true; + flush_job_info.smallest_seqno = UINT64_MAX; + flush_job_info.largest_seqno = UINT64_MAX; + flush_job_info.table_properties = table_properties; + flush_job_info.flush_reason = FlushReason::kManualFlush; + + el->OnFlushCompleted(nullptr, flush_job_info); + el->OnFlushBegin(nullptr, flush_job_info); + + Status status = Status::Incomplete(Status::SubCode::kNoSpace); + + TableFileDeletionInfo file_deletion_info; + file_deletion_info.db_name = "dbName"; + file_deletion_info.file_path = "/file/path"; + file_deletion_info.job_id = INT_MAX; + file_deletion_info.status = status; + + el->OnTableFileDeleted(file_deletion_info); + + CompactionJobInfo compaction_job_info; + compaction_job_info.cf_id = UINT32_MAX; + compaction_job_info.cf_name = "compactionColumnFamily"; + compaction_job_info.status = status; + compaction_job_info.thread_id = UINT64_MAX; + compaction_job_info.job_id = INT_MAX; + compaction_job_info.base_input_level = INT_MAX; + compaction_job_info.output_level = INT_MAX; + compaction_job_info.input_files = {"inputFile.sst"}; + compaction_job_info.input_file_infos = {}; + compaction_job_info.output_files = {"outputFile.sst"}; + compaction_job_info.output_file_infos = {}; + compaction_job_info.table_properties = { + {"tableProperties", std::shared_ptr( + &table_properties, [](TableProperties *) {})}}; + compaction_job_info.compaction_reason = CompactionReason::kFlush; + compaction_job_info.compression = CompressionType::kSnappyCompression; + + compaction_job_info.stats = CompactionJobStats(); + + el->OnCompactionBegin(nullptr, compaction_job_info); + el->OnCompactionCompleted(nullptr, compaction_job_info); + + TableFileCreationInfo file_creation_info; + file_creation_info.file_size = UINT64_MAX; + file_creation_info.table_properties = table_properties; + file_creation_info.status = status; + file_creation_info.file_checksum = "fileChecksum"; + file_creation_info.file_checksum_func_name = "fileChecksumFuncName"; + file_creation_info.db_name = "dbName"; + file_creation_info.cf_name = "columnFamilyName"; + file_creation_info.file_path = "/file/path"; + file_creation_info.job_id = INT_MAX; + file_creation_info.reason = TableFileCreationReason::kMisc; + + el->OnTableFileCreated(file_creation_info); + + TableFileCreationBriefInfo file_creation_brief_info; + file_creation_brief_info.db_name = "dbName"; + file_creation_brief_info.cf_name = "columnFamilyName"; + file_creation_brief_info.file_path = "/file/path"; + file_creation_brief_info.job_id = INT_MAX; + file_creation_brief_info.reason = TableFileCreationReason::kMisc; + + el->OnTableFileCreationStarted(file_creation_brief_info); + + MemTableInfo mem_table_info; + mem_table_info.cf_name = "columnFamilyName"; + mem_table_info.first_seqno = UINT64_MAX; + mem_table_info.earliest_seqno = UINT64_MAX; + mem_table_info.num_entries = UINT64_MAX; + mem_table_info.num_deletes = UINT64_MAX; + + el->OnMemTableSealed(mem_table_info); + el->OnColumnFamilyHandleDeletionStarted(nullptr); + + ExternalFileIngestionInfo file_ingestion_info; + file_ingestion_info.cf_name = "columnFamilyName"; + file_ingestion_info.external_file_path = "/external/file/path"; + file_ingestion_info.internal_file_path = "/internal/file/path"; + file_ingestion_info.global_seqno = UINT64_MAX; + file_ingestion_info.table_properties = table_properties; + el->OnExternalFileIngested(nullptr, file_ingestion_info); + + el->OnBackgroundError(BackgroundErrorReason::kFlush, &status); + + WriteStallInfo write_stall_info; + write_stall_info.cf_name = "columnFamilyName"; + write_stall_info.condition.cur = WriteStallCondition::kDelayed; + write_stall_info.condition.prev = WriteStallCondition::kStopped; + el->OnStallConditionsChanged(write_stall_info); + + const std::string file_path = "/file/path"; + const auto start_timestamp = + std::make_pair(std::chrono::time_point( + std::chrono::nanoseconds(1600699420000000000ll)), + std::chrono::time_point( + std::chrono::nanoseconds(1600699420000000000ll))); + const auto finish_timestamp = + std::chrono::time_point( + std::chrono::nanoseconds(1600699425000000000ll)); + FileOperationInfo op_info = + FileOperationInfo(FileOperationType::kRead, file_path, start_timestamp, + finish_timestamp, status); + op_info.offset = UINT64_MAX; + op_info.length = SIZE_MAX; + + el->OnFileReadFinish(op_info); + el->OnFileWriteFinish(op_info); + el->OnFileFlushFinish(op_info); + el->OnFileSyncFinish(op_info); + el->OnFileRangeSyncFinish(op_info); + el->OnFileTruncateFinish(op_info); + el->OnFileCloseFinish(op_info); + el->ShouldBeNotifiedOnFileIO(); + + bool auto_recovery; + el->OnErrorRecoveryBegin(BackgroundErrorReason::kFlush, status, + &auto_recovery); + el->OnErrorRecoveryCompleted(status); +} diff --git a/src/rocksdb/java/rocksjni/thread_status.cc b/src/rocksdb/java/rocksjni/thread_status.cc new file mode 100644 index 000000000..c600f6cd5 --- /dev/null +++ b/src/rocksdb/java/rocksjni/thread_status.cc @@ -0,0 +1,125 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::ThreadStatus methods from Java side. + +#include "rocksdb/thread_status.h" + +#include + +#include "include/org_rocksdb_ThreadStatus.h" +#include "portal.h" + +/* + * Class: org_rocksdb_ThreadStatus + * Method: getThreadTypeName + * Signature: (B)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_getThreadTypeName( + JNIEnv* env, jclass, jbyte jthread_type_value) { + auto name = ROCKSDB_NAMESPACE::ThreadStatus::GetThreadTypeName( + ROCKSDB_NAMESPACE::ThreadTypeJni::toCppThreadType(jthread_type_value)); + return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, true); +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: getOperationName + * Signature: (B)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_getOperationName( + JNIEnv* env, jclass, jbyte joperation_type_value) { + auto name = ROCKSDB_NAMESPACE::ThreadStatus::GetOperationName( + ROCKSDB_NAMESPACE::OperationTypeJni::toCppOperationType( + joperation_type_value)); + return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, true); +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: microsToStringNative + * Signature: (J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_microsToStringNative(JNIEnv* env, jclass, + jlong jmicros) { + auto str = ROCKSDB_NAMESPACE::ThreadStatus::MicrosToString( + static_cast(jmicros)); + return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &str, true); +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: getOperationStageName + * Signature: (B)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_getOperationStageName( + JNIEnv* env, jclass, jbyte joperation_stage_value) { + auto name = ROCKSDB_NAMESPACE::ThreadStatus::GetOperationStageName( + ROCKSDB_NAMESPACE::OperationStageJni::toCppOperationStage( + joperation_stage_value)); + return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, true); +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: getOperationPropertyName + * Signature: (BI)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_getOperationPropertyName( + JNIEnv* env, jclass, jbyte joperation_type_value, jint jindex) { + auto name = ROCKSDB_NAMESPACE::ThreadStatus::GetOperationPropertyName( + ROCKSDB_NAMESPACE::OperationTypeJni::toCppOperationType( + joperation_type_value), + static_cast(jindex)); + return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, true); +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: interpretOperationProperties + * Signature: (B[J)Ljava/util/Map; + */ +jobject Java_org_rocksdb_ThreadStatus_interpretOperationProperties( + JNIEnv* env, jclass, jbyte joperation_type_value, + jlongArray joperation_properties) { + // convert joperation_properties + const jsize len = env->GetArrayLength(joperation_properties); + const std::unique_ptr op_properties(new uint64_t[len]); + jlong* jop = env->GetLongArrayElements(joperation_properties, nullptr); + if (jop == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + for (jsize i = 0; i < len; i++) { + op_properties[i] = static_cast(jop[i]); + } + env->ReleaseLongArrayElements(joperation_properties, jop, JNI_ABORT); + + // call the function + auto result = ROCKSDB_NAMESPACE::ThreadStatus::InterpretOperationProperties( + ROCKSDB_NAMESPACE::OperationTypeJni::toCppOperationType( + joperation_type_value), + op_properties.get()); + jobject jresult = ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(env, &result); + if (env->ExceptionCheck()) { + // exception occurred + return nullptr; + } + + return jresult; +} + +/* + * Class: org_rocksdb_ThreadStatus + * Method: getStateName + * Signature: (B)Ljava/lang/String; + */ +jstring Java_org_rocksdb_ThreadStatus_getStateName(JNIEnv* env, jclass, + jbyte jstate_type_value) { + auto name = ROCKSDB_NAMESPACE::ThreadStatus::GetStateName( + ROCKSDB_NAMESPACE::StateTypeJni::toCppStateType(jstate_type_value)); + return ROCKSDB_NAMESPACE::JniUtil::toJavaString(env, &name, true); +} diff --git a/src/rocksdb/java/rocksjni/trace_writer.cc b/src/rocksdb/java/rocksjni/trace_writer.cc new file mode 100644 index 000000000..d58276399 --- /dev/null +++ b/src/rocksdb/java/rocksjni/trace_writer.cc @@ -0,0 +1,24 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::CompactionFilterFactory. + +#include + +#include "include/org_rocksdb_AbstractTraceWriter.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/trace_writer_jnicallback.h" + +/* + * Class: org_rocksdb_AbstractTraceWriter + * Method: createNewTraceWriter + * Signature: ()J + */ +jlong Java_org_rocksdb_AbstractTraceWriter_createNewTraceWriter(JNIEnv* env, + jobject jobj) { + auto* trace_writer = new ROCKSDB_NAMESPACE::TraceWriterJniCallback(env, jobj); + return GET_CPLUSPLUS_POINTER(trace_writer); +} diff --git a/src/rocksdb/java/rocksjni/trace_writer_jnicallback.cc b/src/rocksdb/java/rocksjni/trace_writer_jnicallback.cc new file mode 100644 index 000000000..d1ed32038 --- /dev/null +++ b/src/rocksdb/java/rocksjni/trace_writer_jnicallback.cc @@ -0,0 +1,118 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::TraceWriter. + +#include "rocksjni/trace_writer_jnicallback.h" + +#include "rocksjni/portal.h" + +namespace ROCKSDB_NAMESPACE { +TraceWriterJniCallback::TraceWriterJniCallback(JNIEnv* env, + jobject jtrace_writer) + : JniCallback(env, jtrace_writer) { + m_jwrite_proxy_methodid = AbstractTraceWriterJni::getWriteProxyMethodId(env); + if (m_jwrite_proxy_methodid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + m_jclose_writer_proxy_methodid = + AbstractTraceWriterJni::getCloseWriterProxyMethodId(env); + if (m_jclose_writer_proxy_methodid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + m_jget_file_size_methodid = + AbstractTraceWriterJni::getGetFileSizeMethodId(env); + if (m_jget_file_size_methodid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } +} + +Status TraceWriterJniCallback::Write(const Slice& data) { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + if (env == nullptr) { + return Status::IOError("Unable to attach JNI Environment"); + } + + jshort jstatus = + env->CallShortMethod(m_jcallback_obj, m_jwrite_proxy_methodid, &data); + + if (env->ExceptionCheck()) { + // exception thrown from CallShortMethod + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return Status::IOError( + "Unable to call AbstractTraceWriter#writeProxy(long)"); + } + + // unpack status code and status sub-code from jstatus + jbyte jcode_value = (jstatus >> 8) & 0xFF; + jbyte jsub_code_value = jstatus & 0xFF; + std::unique_ptr s = + StatusJni::toCppStatus(jcode_value, jsub_code_value); + + releaseJniEnv(attached_thread); + + return Status(*s); +} + +Status TraceWriterJniCallback::Close() { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + if (env == nullptr) { + return Status::IOError("Unable to attach JNI Environment"); + } + + jshort jstatus = + env->CallShortMethod(m_jcallback_obj, m_jclose_writer_proxy_methodid); + + if (env->ExceptionCheck()) { + // exception thrown from CallShortMethod + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return Status::IOError( + "Unable to call AbstractTraceWriter#closeWriterProxy()"); + } + + // unpack status code and status sub-code from jstatus + jbyte code_value = (jstatus >> 8) & 0xFF; + jbyte sub_code_value = jstatus & 0xFF; + std::unique_ptr s = + StatusJni::toCppStatus(code_value, sub_code_value); + + releaseJniEnv(attached_thread); + + return Status(*s); +} + +uint64_t TraceWriterJniCallback::GetFileSize() { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + if (env == nullptr) { + return 0; + } + + jlong jfile_size = + env->CallLongMethod(m_jcallback_obj, m_jget_file_size_methodid); + + if (env->ExceptionCheck()) { + // exception thrown from CallLongMethod + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return 0; + } + + releaseJniEnv(attached_thread); + + return static_cast(jfile_size); +} + +} // namespace ROCKSDB_NAMESPACE diff --git a/src/rocksdb/java/rocksjni/trace_writer_jnicallback.h b/src/rocksdb/java/rocksjni/trace_writer_jnicallback.h new file mode 100644 index 000000000..c82a3a72c --- /dev/null +++ b/src/rocksdb/java/rocksjni/trace_writer_jnicallback.h @@ -0,0 +1,36 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::TraceWriter. + +#ifndef JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_ +#define JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_ + +#include + +#include + +#include "rocksdb/trace_reader_writer.h" +#include "rocksjni/jnicallback.h" + +namespace ROCKSDB_NAMESPACE { + +class TraceWriterJniCallback : public JniCallback, public TraceWriter { + public: + TraceWriterJniCallback(JNIEnv* env, jobject jtrace_writer); + virtual Status Write(const Slice& data); + virtual Status Close(); + virtual uint64_t GetFileSize(); + + private: + jmethodID m_jwrite_proxy_methodid; + jmethodID m_jclose_writer_proxy_methodid; + jmethodID m_jget_file_size_methodid; +}; + +} // namespace ROCKSDB_NAMESPACE + +#endif // JAVA_ROCKSJNI_TRACE_WRITER_JNICALLBACK_H_ diff --git a/src/rocksdb/java/rocksjni/transaction.cc b/src/rocksdb/java/rocksjni/transaction.cc new file mode 100644 index 000000000..1a0a64fc7 --- /dev/null +++ b/src/rocksdb/java/rocksjni/transaction.cc @@ -0,0 +1,1655 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for ROCKSDB_NAMESPACE::Transaction. + +#include "rocksdb/utilities/transaction.h" + +#include + +#include + +#include "include/org_rocksdb_Transaction.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4503) // identifier' : decorated name length + // exceeded, name was truncated +#endif + +/* + * Class: org_rocksdb_Transaction + * Method: setSnapshot + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_setSnapshot(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->SetSnapshot(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setSnapshotOnNextOperation + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_setSnapshotOnNextOperation__J( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->SetSnapshotOnNextOperation(nullptr); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setSnapshotOnNextOperation + * Signature: (JJ)V + */ +void Java_org_rocksdb_Transaction_setSnapshotOnNextOperation__JJ( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jtxn_notifier_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* txn_notifier = reinterpret_cast< + std::shared_ptr*>( + jtxn_notifier_handle); + txn->SetSnapshotOnNextOperation(*txn_notifier); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getSnapshot + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getSnapshot(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + const ROCKSDB_NAMESPACE::Snapshot* snapshot = txn->GetSnapshot(); + return GET_CPLUSPLUS_POINTER(snapshot); +} + +/* + * Class: org_rocksdb_Transaction + * Method: clearSnapshot + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_clearSnapshot(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->ClearSnapshot(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: prepare + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_prepare(JNIEnv* env, jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::Status s = txn->Prepare(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Transaction + * Method: commit + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_commit(JNIEnv* env, jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::Status s = txn->Commit(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Transaction + * Method: rollback + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_rollback(JNIEnv* env, jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::Status s = txn->Rollback(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Transaction + * Method: setSavePoint + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_setSavePoint(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->SetSavePoint(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: rollbackToSavePoint + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_rollbackToSavePoint(JNIEnv* env, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::Status s = txn->RollbackToSavePoint(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +typedef std::function + FnGet; + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +jbyteArray txn_get_helper(JNIEnv* env, const FnGet& fn_get, + const jlong& jread_options_handle, + const jbyteArray& jkey, const jint& jkey_part_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), + jkey_part_len); + + auto* read_options = + reinterpret_cast(jread_options_handle); + std::string value; + ROCKSDB_NAMESPACE::Status s = fn_get(*read_options, key_slice, &value); + + // trigger java unref on key. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + + if (s.IsNotFound()) { + return nullptr; + } + + if (s.ok()) { + jbyteArray jret_value = env->NewByteArray(static_cast(value.size())); + if (jret_value == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jret_value, 0, static_cast(value.size()), + const_cast(reinterpret_cast(value.c_str()))); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + return nullptr; + } + return jret_value; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; +} + +/* + * Class: org_rocksdb_Transaction + * Method: get + * Signature: (JJ[BIJ)[B + */ +jbyteArray Java_org_rocksdb_Transaction_get__JJ_3BIJ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle, + jbyteArray jkey, jint jkey_part_len, jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnGet fn_get = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::Get, txn, std::placeholders::_1, + column_family_handle, std::placeholders::_2, std::placeholders::_3); + return txn_get_helper(env, fn_get, jread_options_handle, jkey, jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: get + * Signature: (JJ[BI)[B + */ +jbyteArray Java_org_rocksdb_Transaction_get__JJ_3BI( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle, + jbyteArray jkey, jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + FnGet fn_get = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::Get, txn, std::placeholders::_1, + std::placeholders::_2, std::placeholders::_3); + return txn_get_helper(env, fn_get, jread_options_handle, jkey, jkey_part_len); +} + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +// used by txn_multi_get_helper below +std::vector txn_column_families_helper( + JNIEnv* env, jlongArray jcolumn_family_handles, bool* has_exception) { + std::vector cf_handles; + if (jcolumn_family_handles != nullptr) { + const jsize len_cols = env->GetArrayLength(jcolumn_family_handles); + if (len_cols > 0) { + jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, nullptr); + if (jcfh == nullptr) { + // exception thrown: OutOfMemoryError + *has_exception = JNI_TRUE; + return std::vector(); + } + for (int i = 0; i < len_cols; i++) { + auto* cf_handle = + reinterpret_cast(jcfh[i]); + cf_handles.push_back(cf_handle); + } + env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); + } + } + return cf_handles; +} + +typedef std::function( + const ROCKSDB_NAMESPACE::ReadOptions&, + const std::vector&, std::vector*)> + FnMultiGet; + +void free_parts( + JNIEnv* env, + std::vector>& parts_to_free) { + for (auto& value : parts_to_free) { + jobject jk; + jbyteArray jk_ba; + jbyte* jk_val; + std::tie(jk_ba, jk_val, jk) = value; + env->ReleaseByteArrayElements(jk_ba, jk_val, JNI_ABORT); + env->DeleteLocalRef(jk); + } +} + +void free_key_values(std::vector& keys_to_free) { + for (auto& key : keys_to_free) { + delete[] key; + } +} + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +// cf multi get +jobjectArray txn_multi_get_helper(JNIEnv* env, const FnMultiGet& fn_multi_get, + const jlong& jread_options_handle, + const jobjectArray& jkey_parts) { + const jsize len_key_parts = env->GetArrayLength(jkey_parts); + + std::vector key_parts; + std::vector keys_to_free; + for (int i = 0; i < len_key_parts; i++) { + const jobject jk = env->GetObjectArrayElement(jkey_parts, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + free_key_values(keys_to_free); + return nullptr; + } + jbyteArray jk_ba = reinterpret_cast(jk); + const jsize len_key = env->GetArrayLength(jk_ba); + jbyte* jk_val = new jbyte[len_key]; + if (jk_val == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jk); + free_key_values(keys_to_free); + + jclass exception_cls = (env)->FindClass("java/lang/OutOfMemoryError"); + (env)->ThrowNew(exception_cls, + "Insufficient Memory for CF handle array."); + return nullptr; + } + env->GetByteArrayRegion(jk_ba, 0, len_key, jk_val); + + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(jk_val), + len_key); + key_parts.push_back(key_slice); + keys_to_free.push_back(jk_val); + env->DeleteLocalRef(jk); + } + + auto* read_options = + reinterpret_cast(jread_options_handle); + std::vector value_parts; + std::vector s = + fn_multi_get(*read_options, key_parts, &value_parts); + + // free up allocated byte arrays + free_key_values(keys_to_free); + + // prepare the results + const jclass jcls_ba = env->FindClass("[B"); + jobjectArray jresults = + env->NewObjectArray(static_cast(s.size()), jcls_ba, nullptr); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + // add to the jresults + for (std::vector::size_type i = 0; i != s.size(); + i++) { + if (s[i].ok()) { + jbyteArray jentry_value = + env->NewByteArray(static_cast(value_parts[i].size())); + if (jentry_value == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion( + jentry_value, 0, static_cast(value_parts[i].size()), + const_cast( + reinterpret_cast(value_parts[i].c_str()))); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jentry_value); + return nullptr; + } + + env->SetObjectArrayElement(jresults, static_cast(i), jentry_value); + env->DeleteLocalRef(jentry_value); + } + } + + return jresults; +} + +/* + * Class: org_rocksdb_Transaction + * Method: multiGet + * Signature: (JJ[[B[J)[[B + */ +jobjectArray Java_org_rocksdb_Transaction_multiGet__JJ_3_3B_3J( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle, + jobjectArray jkey_parts, jlongArray jcolumn_family_handles) { + bool has_exception = false; + const std::vector + column_family_handles = txn_column_families_helper( + env, jcolumn_family_handles, &has_exception); + if (has_exception) { + // exception thrown: OutOfMemoryError + return nullptr; + } + auto* txn = reinterpret_cast(jhandle); + FnMultiGet fn_multi_get = std::bind ( + ROCKSDB_NAMESPACE::Transaction::*)( + const ROCKSDB_NAMESPACE::ReadOptions&, + const std::vector&, + const std::vector&, std::vector*)>( + &ROCKSDB_NAMESPACE::Transaction::MultiGet, txn, std::placeholders::_1, + column_family_handles, std::placeholders::_2, std::placeholders::_3); + return txn_multi_get_helper(env, fn_multi_get, jread_options_handle, + jkey_parts); +} + +/* + * Class: org_rocksdb_Transaction + * Method: multiGet + * Signature: (JJ[[B)[[B + */ +jobjectArray Java_org_rocksdb_Transaction_multiGet__JJ_3_3B( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle, + jobjectArray jkey_parts) { + auto* txn = reinterpret_cast(jhandle); + FnMultiGet fn_multi_get = std::bind ( + ROCKSDB_NAMESPACE::Transaction::*)( + const ROCKSDB_NAMESPACE::ReadOptions&, + const std::vector&, std::vector*)>( + &ROCKSDB_NAMESPACE::Transaction::MultiGet, txn, std::placeholders::_1, + std::placeholders::_2, std::placeholders::_3); + return txn_multi_get_helper(env, fn_multi_get, jread_options_handle, + jkey_parts); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getForUpdate + * Signature: (JJ[BIJZZ)[B + */ +jbyteArray Java_org_rocksdb_Transaction_getForUpdate__JJ_3BIJZZ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle, + jbyteArray jkey, jint jkey_part_len, jlong jcolumn_family_handle, + jboolean jexclusive, jboolean jdo_validate) { + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + auto* txn = reinterpret_cast(jhandle); + FnGet fn_get_for_update = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::GetForUpdate, txn, + std::placeholders::_1, column_family_handle, std::placeholders::_2, + std::placeholders::_3, jexclusive, jdo_validate); + return txn_get_helper(env, fn_get_for_update, jread_options_handle, jkey, + jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getForUpdate + * Signature: (JJ[BIZZ)[B + */ +jbyteArray Java_org_rocksdb_Transaction_getForUpdate__JJ_3BIZZ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle, + jbyteArray jkey, jint jkey_part_len, jboolean jexclusive, + jboolean jdo_validate) { + auto* txn = reinterpret_cast(jhandle); + FnGet fn_get_for_update = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::GetForUpdate, txn, + std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, + jexclusive, jdo_validate); + return txn_get_helper(env, fn_get_for_update, jread_options_handle, jkey, + jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: multiGetForUpdate + * Signature: (JJ[[B[J)[[B + */ +jobjectArray Java_org_rocksdb_Transaction_multiGetForUpdate__JJ_3_3B_3J( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle, + jobjectArray jkey_parts, jlongArray jcolumn_family_handles) { + bool has_exception = false; + const std::vector + column_family_handles = txn_column_families_helper( + env, jcolumn_family_handles, &has_exception); + if (has_exception) { + // exception thrown: OutOfMemoryError + return nullptr; + } + auto* txn = reinterpret_cast(jhandle); + FnMultiGet fn_multi_get_for_update = std::bind (ROCKSDB_NAMESPACE::Transaction::*)( + const ROCKSDB_NAMESPACE::ReadOptions&, + const std::vector&, + const std::vector&, std::vector*)>( + &ROCKSDB_NAMESPACE::Transaction::MultiGetForUpdate, txn, + std::placeholders::_1, column_family_handles, std::placeholders::_2, + std::placeholders::_3); + return txn_multi_get_helper(env, fn_multi_get_for_update, + jread_options_handle, jkey_parts); +} + +/* + * Class: org_rocksdb_Transaction + * Method: multiGetForUpdate + * Signature: (JJ[[B)[[B + */ +jobjectArray Java_org_rocksdb_Transaction_multiGetForUpdate__JJ_3_3B( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jread_options_handle, + jobjectArray jkey_parts) { + auto* txn = reinterpret_cast(jhandle); + FnMultiGet fn_multi_get_for_update = std::bind (ROCKSDB_NAMESPACE::Transaction::*)( + const ROCKSDB_NAMESPACE::ReadOptions&, + const std::vector&, std::vector*)>( + &ROCKSDB_NAMESPACE::Transaction::MultiGetForUpdate, txn, + std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); + return txn_multi_get_helper(env, fn_multi_get_for_update, + jread_options_handle, jkey_parts); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getIterator + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_Transaction_getIterator__JJ(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jlong jread_options_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* read_options = + reinterpret_cast(jread_options_handle); + return GET_CPLUSPLUS_POINTER(txn->GetIterator(*read_options)); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getIterator + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_Transaction_getIterator__JJJ( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jread_options_handle, jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* read_options = + reinterpret_cast(jread_options_handle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + return GET_CPLUSPLUS_POINTER( + txn->GetIterator(*read_options, column_family_handle)); +} + +typedef std::function + FnWriteKV; + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +void txn_write_kv_helper(JNIEnv* env, const FnWriteKV& fn_write_kv, + const jbyteArray& jkey, const jint& jkey_part_len, + const jbyteArray& jval, const jint& jval_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + jbyte* value = env->GetByteArrayElements(jval, nullptr); + if (value == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + return; + } + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), + jkey_part_len); + ROCKSDB_NAMESPACE::Slice value_slice(reinterpret_cast(value), + jval_len); + + ROCKSDB_NAMESPACE::Status s = fn_write_kv(key_slice, value_slice); + + // trigger java unref on key. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jval, value, JNI_ABORT); + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + + if (s.ok()) { + return; + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_Transaction + * Method: put + * Signature: (J[BI[BIJZ)V + */ +void Java_org_rocksdb_Transaction_put__J_3BI_3BIJZ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey, + jint jkey_part_len, jbyteArray jval, jint jval_len, + jlong jcolumn_family_handle, jboolean jassume_tracked) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteKV fn_put = + std::bind(&ROCKSDB_NAMESPACE::Transaction::Put, txn, + column_family_handle, std::placeholders::_1, + std::placeholders::_2, jassume_tracked); + txn_write_kv_helper(env, fn_put, jkey, jkey_part_len, jval, jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: put + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_Transaction_put__J_3BI_3BI(JNIEnv* env, jobject /*jobj*/, + jlong jhandle, jbyteArray jkey, + jint jkey_part_len, + jbyteArray jval, + jint jval_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKV fn_put = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::Put, txn, std::placeholders::_1, + std::placeholders::_2); + txn_write_kv_helper(env, fn_put, jkey, jkey_part_len, jval, jval_len); +} + +typedef std::function + FnWriteKVParts; + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +void txn_write_kv_parts_helper(JNIEnv* env, + const FnWriteKVParts& fn_write_kv_parts, + const jobjectArray& jkey_parts, + const jint& jkey_parts_len, + const jobjectArray& jvalue_parts, + const jint& jvalue_parts_len) { +#ifndef DEBUG + (void)jvalue_parts_len; +#else + assert(jkey_parts_len == jvalue_parts_len); +#endif + + auto key_parts = std::vector(); + auto value_parts = std::vector(); + auto jparts_to_free = std::vector>(); + + // Since this is fundamentally a gather write at the RocksDB level, + // it seems wrong to refactor it by copying (gathering) keys and data here, + // in order to avoid the local reference limit. + // The user needs to be a aware that there is a limit to the number of parts + // which can be gathered. + if (env->EnsureLocalCapacity(jkey_parts_len + jvalue_parts_len) != 0) { + // no space for all the jobjects we store up + env->ExceptionClear(); + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew( + env, "Insufficient JNI local references for " + + std::to_string(jkey_parts_len) + " key/value parts"); + return; + } + + // convert java key_parts/value_parts byte[][] to Slice(s) + for (jsize i = 0; i < jkey_parts_len; ++i) { + const jobject jobj_key_part = env->GetObjectArrayElement(jkey_parts, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + free_parts(env, jparts_to_free); + return; + } + const jobject jobj_value_part = env->GetObjectArrayElement(jvalue_parts, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jobj_key_part); + free_parts(env, jparts_to_free); + return; + } + + const jbyteArray jba_key_part = reinterpret_cast(jobj_key_part); + const jsize jkey_part_len = env->GetArrayLength(jba_key_part); + jbyte* jkey_part = env->GetByteArrayElements(jba_key_part, nullptr); + if (jkey_part == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jobj_value_part); + env->DeleteLocalRef(jobj_key_part); + free_parts(env, jparts_to_free); + return; + } + + const jbyteArray jba_value_part = + reinterpret_cast(jobj_value_part); + const jsize jvalue_part_len = env->GetArrayLength(jba_value_part); + jbyte* jvalue_part = env->GetByteArrayElements(jba_value_part, nullptr); + if (jvalue_part == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jobj_value_part); + env->DeleteLocalRef(jobj_key_part); + env->ReleaseByteArrayElements(jba_key_part, jkey_part, JNI_ABORT); + free_parts(env, jparts_to_free); + return; + } + + jparts_to_free.push_back( + std::make_tuple(jba_key_part, jkey_part, jobj_key_part)); + jparts_to_free.push_back( + std::make_tuple(jba_value_part, jvalue_part, jobj_value_part)); + + key_parts.push_back(ROCKSDB_NAMESPACE::Slice( + reinterpret_cast(jkey_part), jkey_part_len)); + value_parts.push_back(ROCKSDB_NAMESPACE::Slice( + reinterpret_cast(jvalue_part), jvalue_part_len)); + } + + // call the write_multi function + ROCKSDB_NAMESPACE::Status s = fn_write_kv_parts( + ROCKSDB_NAMESPACE::SliceParts(key_parts.data(), (int)key_parts.size()), + ROCKSDB_NAMESPACE::SliceParts(value_parts.data(), + (int)value_parts.size())); + + // cleanup temporary memory + free_parts(env, jparts_to_free); + + // return + if (s.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_Transaction + * Method: put + * Signature: (J[[BI[[BIJZ)V + */ +void Java_org_rocksdb_Transaction_put__J_3_3BI_3_3BIJZ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts, + jint jkey_parts_len, jobjectArray jvalue_parts, jint jvalue_parts_len, + jlong jcolumn_family_handle, jboolean jassume_tracked) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteKVParts fn_put_parts = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::Put, txn, column_family_handle, + std::placeholders::_1, std::placeholders::_2, jassume_tracked); + txn_write_kv_parts_helper(env, fn_put_parts, jkey_parts, jkey_parts_len, + jvalue_parts, jvalue_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: put + * Signature: (J[[BI[[BI)V + */ +void Java_org_rocksdb_Transaction_put__J_3_3BI_3_3BI( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts, + jint jkey_parts_len, jobjectArray jvalue_parts, jint jvalue_parts_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKVParts fn_put_parts = std::bind( + &ROCKSDB_NAMESPACE::Transaction::Put, txn, std::placeholders::_1, + std::placeholders::_2); + txn_write_kv_parts_helper(env, fn_put_parts, jkey_parts, jkey_parts_len, + jvalue_parts, jvalue_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: merge + * Signature: (J[BI[BIJZ)V + */ +void Java_org_rocksdb_Transaction_merge__J_3BI_3BIJZ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey, + jint jkey_part_len, jbyteArray jval, jint jval_len, + jlong jcolumn_family_handle, jboolean jassume_tracked) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteKV fn_merge = + std::bind(&ROCKSDB_NAMESPACE::Transaction::Merge, txn, + column_family_handle, std::placeholders::_1, + std::placeholders::_2, jassume_tracked); + txn_write_kv_helper(env, fn_merge, jkey, jkey_part_len, jval, jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: merge + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_Transaction_merge__J_3BI_3BI( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey, + jint jkey_part_len, jbyteArray jval, jint jval_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKV fn_merge = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::Merge, txn, std::placeholders::_1, + std::placeholders::_2); + txn_write_kv_helper(env, fn_merge, jkey, jkey_part_len, jval, jval_len); +} + +typedef std::function + FnWriteK; + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +void txn_write_k_helper(JNIEnv* env, const FnWriteK& fn_write_k, + const jbyteArray& jkey, const jint& jkey_part_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), + jkey_part_len); + + ROCKSDB_NAMESPACE::Status s = fn_write_k(key_slice); + + // trigger java unref on key. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + + if (s.ok()) { + return; + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_Transaction + * Method: delete + * Signature: (J[BIJZ)V + */ +void Java_org_rocksdb_Transaction_delete__J_3BIJZ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey, + jint jkey_part_len, jlong jcolumn_family_handle, jboolean jassume_tracked) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteK fn_delete = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::Delete, txn, column_family_handle, + std::placeholders::_1, jassume_tracked); + txn_write_k_helper(env, fn_delete, jkey, jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: delete + * Signature: (J[BI)V + */ +void Java_org_rocksdb_Transaction_delete__J_3BI(JNIEnv* env, jobject /*jobj*/, + jlong jhandle, jbyteArray jkey, + jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteK fn_delete = std::bind( + &ROCKSDB_NAMESPACE::Transaction::Delete, txn, std::placeholders::_1); + txn_write_k_helper(env, fn_delete, jkey, jkey_part_len); +} + +typedef std::function + FnWriteKParts; + +// TODO(AR) consider refactoring to share this between here and rocksjni.cc +void txn_write_k_parts_helper(JNIEnv* env, + const FnWriteKParts& fn_write_k_parts, + const jobjectArray& jkey_parts, + const jint& jkey_parts_len) { + std::vector key_parts; + std::vector> jkey_parts_to_free; + + // convert java key_parts byte[][] to Slice(s) + for (jint i = 0; i < jkey_parts_len; ++i) { + const jobject jobj_key_part = env->GetObjectArrayElement(jkey_parts, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + free_parts(env, jkey_parts_to_free); + return; + } + + const jbyteArray jba_key_part = reinterpret_cast(jobj_key_part); + const jsize jkey_part_len = env->GetArrayLength(jba_key_part); + jbyte* jkey_part = env->GetByteArrayElements(jba_key_part, nullptr); + if (jkey_part == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jobj_key_part); + free_parts(env, jkey_parts_to_free); + return; + } + + jkey_parts_to_free.push_back(std::tuple( + jba_key_part, jkey_part, jobj_key_part)); + + key_parts.push_back(ROCKSDB_NAMESPACE::Slice( + reinterpret_cast(jkey_part), jkey_part_len)); + } + + // call the write_multi function + ROCKSDB_NAMESPACE::Status s = fn_write_k_parts( + ROCKSDB_NAMESPACE::SliceParts(key_parts.data(), (int)key_parts.size())); + + // cleanup temporary memory + free_parts(env, jkey_parts_to_free); + + // return + if (s.ok()) { + return; + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_Transaction + * Method: delete + * Signature: (J[[BIJZ)V + */ +void Java_org_rocksdb_Transaction_delete__J_3_3BIJZ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts, + jint jkey_parts_len, jlong jcolumn_family_handle, + jboolean jassume_tracked) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteKParts fn_delete_parts = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::Delete, txn, column_family_handle, + std::placeholders::_1, jassume_tracked); + txn_write_k_parts_helper(env, fn_delete_parts, jkey_parts, jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: delete + * Signature: (J[[BI)V + */ +void Java_org_rocksdb_Transaction_delete__J_3_3BI(JNIEnv* env, jobject /*jobj*/, + jlong jhandle, + jobjectArray jkey_parts, + jint jkey_parts_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKParts fn_delete_parts = std::bind( + &ROCKSDB_NAMESPACE::Transaction::Delete, txn, std::placeholders::_1); + txn_write_k_parts_helper(env, fn_delete_parts, jkey_parts, jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: singleDelete + * Signature: (J[BIJZ)V + */ +void Java_org_rocksdb_Transaction_singleDelete__J_3BIJZ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey, + jint jkey_part_len, jlong jcolumn_family_handle, jboolean jassume_tracked) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteK fn_single_delete = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::SingleDelete, txn, + column_family_handle, std::placeholders::_1, jassume_tracked); + txn_write_k_helper(env, fn_single_delete, jkey, jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: singleDelete + * Signature: (J[BI)V + */ +void Java_org_rocksdb_Transaction_singleDelete__J_3BI(JNIEnv* env, + jobject /*jobj*/, + jlong jhandle, + jbyteArray jkey, + jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteK fn_single_delete = std::bind( + &ROCKSDB_NAMESPACE::Transaction::SingleDelete, txn, + std::placeholders::_1); + txn_write_k_helper(env, fn_single_delete, jkey, jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: singleDelete + * Signature: (J[[BIJZ)V + */ +void Java_org_rocksdb_Transaction_singleDelete__J_3_3BIJZ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts, + jint jkey_parts_len, jlong jcolumn_family_handle, + jboolean jassume_tracked) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteKParts fn_single_delete_parts = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::SingleDelete, txn, + column_family_handle, std::placeholders::_1, jassume_tracked); + txn_write_k_parts_helper(env, fn_single_delete_parts, jkey_parts, + jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: singleDelete + * Signature: (J[[BI)V + */ +void Java_org_rocksdb_Transaction_singleDelete__J_3_3BI(JNIEnv* env, + jobject /*jobj*/, + jlong jhandle, + jobjectArray jkey_parts, + jint jkey_parts_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKParts fn_single_delete_parts = std::bind( + &ROCKSDB_NAMESPACE::Transaction::SingleDelete, txn, + std::placeholders::_1); + txn_write_k_parts_helper(env, fn_single_delete_parts, jkey_parts, + jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: putUntracked + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_Transaction_putUntracked__J_3BI_3BIJ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey, + jint jkey_part_len, jbyteArray jval, jint jval_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteKV fn_put_untracked = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::PutUntracked, txn, + column_family_handle, std::placeholders::_1, std::placeholders::_2); + txn_write_kv_helper(env, fn_put_untracked, jkey, jkey_part_len, jval, + jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: putUntracked + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_Transaction_putUntracked__J_3BI_3BI( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey, + jint jkey_part_len, jbyteArray jval, jint jval_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKV fn_put_untracked = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::PutUntracked, txn, + std::placeholders::_1, std::placeholders::_2); + txn_write_kv_helper(env, fn_put_untracked, jkey, jkey_part_len, jval, + jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: putUntracked + * Signature: (J[[BI[[BIJ)V + */ +void Java_org_rocksdb_Transaction_putUntracked__J_3_3BI_3_3BIJ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts, + jint jkey_parts_len, jobjectArray jvalue_parts, jint jvalue_parts_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteKVParts fn_put_parts_untracked = std::bind( + &ROCKSDB_NAMESPACE::Transaction::PutUntracked, txn, column_family_handle, + std::placeholders::_1, std::placeholders::_2); + txn_write_kv_parts_helper(env, fn_put_parts_untracked, jkey_parts, + jkey_parts_len, jvalue_parts, jvalue_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: putUntracked + * Signature: (J[[BI[[BI)V + */ +void Java_org_rocksdb_Transaction_putUntracked__J_3_3BI_3_3BI( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts, + jint jkey_parts_len, jobjectArray jvalue_parts, jint jvalue_parts_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKVParts fn_put_parts_untracked = std::bind( + &ROCKSDB_NAMESPACE::Transaction::PutUntracked, txn, std::placeholders::_1, + std::placeholders::_2); + txn_write_kv_parts_helper(env, fn_put_parts_untracked, jkey_parts, + jkey_parts_len, jvalue_parts, jvalue_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: mergeUntracked + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_Transaction_mergeUntracked__J_3BI_3BIJ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey, + jint jkey_part_len, jbyteArray jval, jint jval_len, + jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteKV fn_merge_untracked = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::MergeUntracked, txn, + column_family_handle, std::placeholders::_1, std::placeholders::_2); + txn_write_kv_helper(env, fn_merge_untracked, jkey, jkey_part_len, jval, + jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: mergeUntracked + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_Transaction_mergeUntracked__J_3BI_3BI( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey, + jint jkey_part_len, jbyteArray jval, jint jval_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKV fn_merge_untracked = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::MergeUntracked, txn, + std::placeholders::_1, std::placeholders::_2); + txn_write_kv_helper(env, fn_merge_untracked, jkey, jkey_part_len, jval, + jval_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: deleteUntracked + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_Transaction_deleteUntracked__J_3BIJ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey, + jint jkey_part_len, jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteK fn_delete_untracked = std::bind( + &ROCKSDB_NAMESPACE::Transaction::DeleteUntracked, txn, + column_family_handle, std::placeholders::_1); + txn_write_k_helper(env, fn_delete_untracked, jkey, jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: deleteUntracked + * Signature: (J[BI)V + */ +void Java_org_rocksdb_Transaction_deleteUntracked__J_3BI(JNIEnv* env, + jobject /*jobj*/, + jlong jhandle, + jbyteArray jkey, + jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteK fn_delete_untracked = std::bind( + &ROCKSDB_NAMESPACE::Transaction::DeleteUntracked, txn, + std::placeholders::_1); + txn_write_k_helper(env, fn_delete_untracked, jkey, jkey_part_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: deleteUntracked + * Signature: (J[[BIJ)V + */ +void Java_org_rocksdb_Transaction_deleteUntracked__J_3_3BIJ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts, + jint jkey_parts_len, jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + FnWriteKParts fn_delete_untracked_parts = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::DeleteUntracked, txn, + column_family_handle, std::placeholders::_1); + txn_write_k_parts_helper(env, fn_delete_untracked_parts, jkey_parts, + jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: deleteUntracked + * Signature: (J[[BI)V + */ +void Java_org_rocksdb_Transaction_deleteUntracked__J_3_3BI( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jobjectArray jkey_parts, + jint jkey_parts_len) { + auto* txn = reinterpret_cast(jhandle); + FnWriteKParts fn_delete_untracked_parts = + std::bind( + &ROCKSDB_NAMESPACE::Transaction::DeleteUntracked, txn, + std::placeholders::_1); + txn_write_k_parts_helper(env, fn_delete_untracked_parts, jkey_parts, + jkey_parts_len); +} + +/* + * Class: org_rocksdb_Transaction + * Method: putLogData + * Signature: (J[BI)V + */ +void Java_org_rocksdb_Transaction_putLogData(JNIEnv* env, jobject /*jobj*/, + jlong jhandle, jbyteArray jkey, + jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), + jkey_part_len); + txn->PutLogData(key_slice); + + // trigger java unref on key. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); +} + +/* + * Class: org_rocksdb_Transaction + * Method: disableIndexing + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_disableIndexing(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->DisableIndexing(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: enableIndexing + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_enableIndexing(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + txn->EnableIndexing(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getNumKeys + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getNumKeys(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetNumKeys(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getNumPuts + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getNumPuts(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetNumPuts(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getNumDeletes + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getNumDeletes(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetNumDeletes(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getNumMerges + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getNumMerges(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetNumMerges(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getElapsedTime + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getElapsedTime(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetElapsedTime(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getWriteBatch + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getWriteBatch(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return GET_CPLUSPLUS_POINTER(txn->GetWriteBatch()); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setLockTimeout + * Signature: (JJ)V + */ +void Java_org_rocksdb_Transaction_setLockTimeout(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jlong jlock_timeout) { + auto* txn = reinterpret_cast(jhandle); + txn->SetLockTimeout(jlock_timeout); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getWriteOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getWriteOptions(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return GET_CPLUSPLUS_POINTER(txn->GetWriteOptions()); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setWriteOptions + * Signature: (JJ)V + */ +void Java_org_rocksdb_Transaction_setWriteOptions(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jlong jwrite_options_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + txn->SetWriteOptions(*write_options); +} + +/* + * Class: org_rocksdb_Transaction + * Method: undo + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_Transaction_undoGetForUpdate__J_3BIJ( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jbyteArray jkey, + jint jkey_part_len, jlong jcolumn_family_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* column_family_handle = + reinterpret_cast( + jcolumn_family_handle); + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), + jkey_part_len); + txn->UndoGetForUpdate(column_family_handle, key_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); +} + +/* + * Class: org_rocksdb_Transaction + * Method: undoGetForUpdate + * Signature: (J[BI)V + */ +void Java_org_rocksdb_Transaction_undoGetForUpdate__J_3BI(JNIEnv* env, + jobject /*jobj*/, + jlong jhandle, + jbyteArray jkey, + jint jkey_part_len) { + auto* txn = reinterpret_cast(jhandle); + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + if (key == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + ROCKSDB_NAMESPACE::Slice key_slice(reinterpret_cast(key), + jkey_part_len); + txn->UndoGetForUpdate(key_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); +} + +/* + * Class: org_rocksdb_Transaction + * Method: rebuildFromWriteBatch + * Signature: (JJ)V + */ +void Java_org_rocksdb_Transaction_rebuildFromWriteBatch( + JNIEnv* env, jobject /*jobj*/, jlong jhandle, jlong jwrite_batch_handle) { + auto* txn = reinterpret_cast(jhandle); + auto* write_batch = + reinterpret_cast(jwrite_batch_handle); + ROCKSDB_NAMESPACE::Status s = txn->RebuildFromWriteBatch(write_batch); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Transaction + * Method: getCommitTimeWriteBatch + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getCommitTimeWriteBatch(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return GET_CPLUSPLUS_POINTER(txn->GetCommitTimeWriteBatch()); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setLogNumber + * Signature: (JJ)V + */ +void Java_org_rocksdb_Transaction_setLogNumber(JNIEnv* /*env*/, + jobject /*jobj*/, jlong jhandle, + jlong jlog_number) { + auto* txn = reinterpret_cast(jhandle); + txn->SetLogNumber(jlog_number); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getLogNumber + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getLogNumber(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return txn->GetLogNumber(); +} + +/* + * Class: org_rocksdb_Transaction + * Method: setName + * Signature: (JLjava/lang/String;)V + */ +void Java_org_rocksdb_Transaction_setName(JNIEnv* env, jobject /*jobj*/, + jlong jhandle, jstring jname) { + auto* txn = reinterpret_cast(jhandle); + const char* name = env->GetStringUTFChars(jname, nullptr); + if (name == nullptr) { + // exception thrown: OutOfMemoryError + return; + } + + ROCKSDB_NAMESPACE::Status s = txn->SetName(name); + + env->ReleaseStringUTFChars(jname, name); + + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Transaction + * Method: getName + * Signature: (J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_Transaction_getName(JNIEnv* env, jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::TransactionName name = txn->GetName(); + return env->NewStringUTF(name.data()); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getID + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getID(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::TransactionID id = txn->GetID(); + return static_cast(id); +} + +/* + * Class: org_rocksdb_Transaction + * Method: isDeadlockDetect + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Transaction_isDeadlockDetect(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + return static_cast(txn->IsDeadlockDetect()); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getWaitingTxns + * Signature: (J)Lorg/rocksdb/Transaction/WaitingTransactions; + */ +jobject Java_org_rocksdb_Transaction_getWaitingTxns(JNIEnv* env, + jobject jtransaction_obj, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + uint32_t column_family_id; + std::string key; + std::vector waiting_txns = + txn->GetWaitingTxns(&column_family_id, &key); + jobject jwaiting_txns = + ROCKSDB_NAMESPACE::TransactionJni::newWaitingTransactions( + env, jtransaction_obj, column_family_id, key, waiting_txns); + return jwaiting_txns; +} + +/* + * Class: org_rocksdb_Transaction + * Method: getState + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Transaction_getState(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + ROCKSDB_NAMESPACE::Transaction::TransactionState txn_status = txn->GetState(); + switch (txn_status) { + case ROCKSDB_NAMESPACE::Transaction::TransactionState::STARTED: + return 0x0; + + case ROCKSDB_NAMESPACE::Transaction::TransactionState::AWAITING_PREPARE: + return 0x1; + + case ROCKSDB_NAMESPACE::Transaction::TransactionState::PREPARED: + return 0x2; + + case ROCKSDB_NAMESPACE::Transaction::TransactionState::AWAITING_COMMIT: + return 0x3; + + case ROCKSDB_NAMESPACE::Transaction::TransactionState::COMMITTED: + return 0x4; + + case ROCKSDB_NAMESPACE::Transaction::TransactionState::AWAITING_ROLLBACK: + return 0x5; + + case ROCKSDB_NAMESPACE::Transaction::TransactionState::ROLLEDBACK: + return 0x6; + + case ROCKSDB_NAMESPACE::Transaction::TransactionState::LOCKS_STOLEN: + return 0x7; + } + + assert(false); + return static_cast(-1); +} + +/* + * Class: org_rocksdb_Transaction + * Method: getId + * Signature: (J)J + */ +jlong Java_org_rocksdb_Transaction_getId(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jhandle) { + auto* txn = reinterpret_cast(jhandle); + uint64_t id = txn->GetId(); + return static_cast(id); +} + +/* + * Class: org_rocksdb_Transaction + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_Transaction_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/src/rocksdb/java/rocksjni/transaction_db.cc b/src/rocksdb/java/rocksjni/transaction_db.cc new file mode 100644 index 000000000..0adf85606 --- /dev/null +++ b/src/rocksdb/java/rocksjni/transaction_db.cc @@ -0,0 +1,451 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for ROCKSDB_NAMESPACE::TransactionDB. + +#include "rocksdb/utilities/transaction_db.h" + +#include + +#include +#include +#include + +#include "include/org_rocksdb_TransactionDB.h" +#include "rocksdb/options.h" +#include "rocksdb/utilities/transaction.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_TransactionDB + * Method: open + * Signature: (JJLjava/lang/String;)J + */ +jlong Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2( + JNIEnv* env, jclass, jlong joptions_handle, jlong jtxn_db_options_handle, + jstring jdb_path) { + auto* options = + reinterpret_cast(joptions_handle); + auto* txn_db_options = + reinterpret_cast( + jtxn_db_options_handle); + ROCKSDB_NAMESPACE::TransactionDB* tdb = nullptr; + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::TransactionDB::Open( + *options, *txn_db_options, db_path, &tdb); + env->ReleaseStringUTFChars(jdb_path, db_path); + + if (s.ok()) { + return GET_CPLUSPLUS_POINTER(tdb); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return 0; + } +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: open + * Signature: (JJLjava/lang/String;[[B[J)[J + */ +jlongArray Java_org_rocksdb_TransactionDB_open__JJLjava_lang_String_2_3_3B_3J( + JNIEnv* env, jclass, jlong jdb_options_handle, jlong jtxn_db_options_handle, + jstring jdb_path, jobjectArray jcolumn_names, + jlongArray jcolumn_options_handles) { + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + const jsize len_cols = env->GetArrayLength(jcolumn_names); + jlong* jco = env->GetLongArrayElements(jcolumn_options_handles, nullptr); + if (jco == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + std::vector column_families; + for (int i = 0; i < len_cols; i++) { + const jobject jcn = env->GetObjectArrayElement(jcolumn_names, i); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + const jbyteArray jcn_ba = reinterpret_cast(jcn); + jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, nullptr); + if (jcf_name == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jcn); + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + const int jcf_name_len = env->GetArrayLength(jcn_ba); + const std::string cf_name(reinterpret_cast(jcf_name), jcf_name_len); + const ROCKSDB_NAMESPACE::ColumnFamilyOptions* cf_options = + reinterpret_cast(jco[i]); + column_families.push_back( + ROCKSDB_NAMESPACE::ColumnFamilyDescriptor(cf_name, *cf_options)); + + env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT); + env->DeleteLocalRef(jcn); + } + env->ReleaseLongArrayElements(jcolumn_options_handles, jco, JNI_ABORT); + + auto* db_options = + reinterpret_cast(jdb_options_handle); + auto* txn_db_options = + reinterpret_cast( + jtxn_db_options_handle); + std::vector handles; + ROCKSDB_NAMESPACE::TransactionDB* tdb = nullptr; + const ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::TransactionDB::Open( + *db_options, *txn_db_options, db_path, column_families, &handles, &tdb); + + // check if open operation was successful + if (s.ok()) { + const jsize resultsLen = 1 + len_cols; // db handle + column family handles + std::unique_ptr results = + std::unique_ptr(new jlong[resultsLen]); + results[0] = GET_CPLUSPLUS_POINTER(tdb); + for (int i = 1; i <= len_cols; i++) { + results[i] = GET_CPLUSPLUS_POINTER(handles[i - 1]); + } + + jlongArray jresults = env->NewLongArray(resultsLen); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetLongArrayRegion(jresults, 0, resultsLen, results.get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresults); + return nullptr; + } + return jresults; + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return nullptr; + } +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionDB_disposeInternal(JNIEnv*, jobject, + jlong jhandle) { + auto* txn_db = reinterpret_cast(jhandle); + assert(txn_db != nullptr); + delete txn_db; +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: closeDatabase + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionDB_closeDatabase(JNIEnv* env, jclass, + jlong jhandle) { + auto* txn_db = reinterpret_cast(jhandle); + assert(txn_db != nullptr); + ROCKSDB_NAMESPACE::Status s = txn_db->Close(); + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: beginTransaction + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJ( + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle) { + auto* txn_db = reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + ROCKSDB_NAMESPACE::Transaction* txn = + txn_db->BeginTransaction(*write_options); + return GET_CPLUSPLUS_POINTER(txn); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: beginTransaction + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_TransactionDB_beginTransaction__JJJ( + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle, + jlong jtxn_options_handle) { + auto* txn_db = reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* txn_options = reinterpret_cast( + jtxn_options_handle); + ROCKSDB_NAMESPACE::Transaction* txn = + txn_db->BeginTransaction(*write_options, *txn_options); + return GET_CPLUSPLUS_POINTER(txn); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: beginTransaction_withOld + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJ( + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle, + jlong jold_txn_handle) { + auto* txn_db = reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* old_txn = + reinterpret_cast(jold_txn_handle); + ROCKSDB_NAMESPACE::TransactionOptions txn_options; + ROCKSDB_NAMESPACE::Transaction* txn = + txn_db->BeginTransaction(*write_options, txn_options, old_txn); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(txn == old_txn); + + return GET_CPLUSPLUS_POINTER(txn); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: beginTransaction_withOld + * Signature: (JJJJ)J + */ +jlong Java_org_rocksdb_TransactionDB_beginTransaction_1withOld__JJJJ( + JNIEnv*, jobject, jlong jhandle, jlong jwrite_options_handle, + jlong jtxn_options_handle, jlong jold_txn_handle) { + auto* txn_db = reinterpret_cast(jhandle); + auto* write_options = + reinterpret_cast(jwrite_options_handle); + auto* txn_options = reinterpret_cast( + jtxn_options_handle); + auto* old_txn = + reinterpret_cast(jold_txn_handle); + ROCKSDB_NAMESPACE::Transaction* txn = + txn_db->BeginTransaction(*write_options, *txn_options, old_txn); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(txn == old_txn); + + return GET_CPLUSPLUS_POINTER(txn); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: getTransactionByName + * Signature: (JLjava/lang/String;)J + */ +jlong Java_org_rocksdb_TransactionDB_getTransactionByName(JNIEnv* env, jobject, + jlong jhandle, + jstring jname) { + auto* txn_db = reinterpret_cast(jhandle); + const char* name = env->GetStringUTFChars(jname, nullptr); + if (name == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + ROCKSDB_NAMESPACE::Transaction* txn = txn_db->GetTransactionByName(name); + env->ReleaseStringUTFChars(jname, name); + return GET_CPLUSPLUS_POINTER(txn); +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: getAllPreparedTransactions + * Signature: (J)[J + */ +jlongArray Java_org_rocksdb_TransactionDB_getAllPreparedTransactions( + JNIEnv* env, jobject, jlong jhandle) { + auto* txn_db = reinterpret_cast(jhandle); + std::vector txns; + txn_db->GetAllPreparedTransactions(&txns); + + const size_t size = txns.size(); + assert(size < UINT32_MAX); // does it fit in a jint? + + const jsize len = static_cast(size); + std::vector tmp(len); + for (jsize i = 0; i < len; ++i) { + tmp[i] = GET_CPLUSPLUS_POINTER(txns[i]); + } + + jlongArray jtxns = env->NewLongArray(len); + if (jtxns == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetLongArrayRegion(jtxns, 0, len, tmp.data()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jtxns); + return nullptr; + } + + return jtxns; +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: getLockStatusData + * Signature: (J)Ljava/util/Map; + */ +jobject Java_org_rocksdb_TransactionDB_getLockStatusData(JNIEnv* env, jobject, + jlong jhandle) { + auto* txn_db = reinterpret_cast(jhandle); + const std::unordered_multimap + lock_status_data = txn_db->GetLockStatusData(); + const jobject jlock_status_data = ROCKSDB_NAMESPACE::HashMapJni::construct( + env, static_cast(lock_status_data.size())); + if (jlock_status_data == nullptr) { + // exception occurred + return nullptr; + } + + const ROCKSDB_NAMESPACE::HashMapJni::FnMapKV< + const int32_t, const ROCKSDB_NAMESPACE::KeyLockInfo, jobject, jobject> + fn_map_kv = + [env](const std::pair& pair) { + const jobject jlong_column_family_id = + ROCKSDB_NAMESPACE::LongJni::valueOf(env, pair.first); + if (jlong_column_family_id == nullptr) { + // an error occurred + return std::unique_ptr>(nullptr); + } + const jobject jkey_lock_info = + ROCKSDB_NAMESPACE::KeyLockInfoJni::construct(env, pair.second); + if (jkey_lock_info == nullptr) { + // an error occurred + return std::unique_ptr>(nullptr); + } + return std::unique_ptr>( + new std::pair(jlong_column_family_id, + jkey_lock_info)); + }; + + if (!ROCKSDB_NAMESPACE::HashMapJni::putAll( + env, jlock_status_data, lock_status_data.begin(), + lock_status_data.end(), fn_map_kv)) { + // exception occcurred + return nullptr; + } + + return jlock_status_data; +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: getDeadlockInfoBuffer + * Signature: (J)[Lorg/rocksdb/TransactionDB/DeadlockPath; + */ +jobjectArray Java_org_rocksdb_TransactionDB_getDeadlockInfoBuffer( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* txn_db = reinterpret_cast(jhandle); + const std::vector deadlock_info_buffer = + txn_db->GetDeadlockInfoBuffer(); + + const jsize deadlock_info_buffer_len = + static_cast(deadlock_info_buffer.size()); + jobjectArray jdeadlock_info_buffer = env->NewObjectArray( + deadlock_info_buffer_len, + ROCKSDB_NAMESPACE::DeadlockPathJni::getJClass(env), nullptr); + if (jdeadlock_info_buffer == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + jsize jdeadlock_info_buffer_offset = 0; + + auto buf_end = deadlock_info_buffer.end(); + for (auto buf_it = deadlock_info_buffer.begin(); buf_it != buf_end; + ++buf_it) { + const ROCKSDB_NAMESPACE::DeadlockPath deadlock_path = *buf_it; + const std::vector deadlock_infos = + deadlock_path.path; + const jsize deadlock_infos_len = + static_cast(deadlock_info_buffer.size()); + jobjectArray jdeadlock_infos = env->NewObjectArray( + deadlock_infos_len, ROCKSDB_NAMESPACE::DeadlockInfoJni::getJClass(env), + nullptr); + if (jdeadlock_infos == nullptr) { + // exception thrown: OutOfMemoryError + env->DeleteLocalRef(jdeadlock_info_buffer); + return nullptr; + } + jsize jdeadlock_infos_offset = 0; + + auto infos_end = deadlock_infos.end(); + for (auto infos_it = deadlock_infos.begin(); infos_it != infos_end; + ++infos_it) { + const ROCKSDB_NAMESPACE::DeadlockInfo deadlock_info = *infos_it; + const jobject jdeadlock_info = + ROCKSDB_NAMESPACE::TransactionDBJni::newDeadlockInfo( + env, jobj, deadlock_info.m_txn_id, deadlock_info.m_cf_id, + deadlock_info.m_waiting_key, deadlock_info.m_exclusive); + if (jdeadlock_info == nullptr) { + // exception occcurred + env->DeleteLocalRef(jdeadlock_info_buffer); + return nullptr; + } + env->SetObjectArrayElement(jdeadlock_infos, jdeadlock_infos_offset++, + jdeadlock_info); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException or + // ArrayStoreException + env->DeleteLocalRef(jdeadlock_info); + env->DeleteLocalRef(jdeadlock_info_buffer); + return nullptr; + } + } + + const jobject jdeadlock_path = + ROCKSDB_NAMESPACE::DeadlockPathJni::construct( + env, jdeadlock_infos, deadlock_path.limit_exceeded); + if (jdeadlock_path == nullptr) { + // exception occcurred + env->DeleteLocalRef(jdeadlock_info_buffer); + return nullptr; + } + env->SetObjectArrayElement(jdeadlock_info_buffer, + jdeadlock_info_buffer_offset++, jdeadlock_path); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException or ArrayStoreException + env->DeleteLocalRef(jdeadlock_path); + env->DeleteLocalRef(jdeadlock_info_buffer); + return nullptr; + } + } + + return jdeadlock_info_buffer; +} + +/* + * Class: org_rocksdb_TransactionDB + * Method: setDeadlockInfoBufferSize + * Signature: (JI)V + */ +void Java_org_rocksdb_TransactionDB_setDeadlockInfoBufferSize( + JNIEnv*, jobject, jlong jhandle, jint jdeadlock_info_buffer_size) { + auto* txn_db = reinterpret_cast(jhandle); + txn_db->SetDeadlockInfoBufferSize(jdeadlock_info_buffer_size); +} diff --git a/src/rocksdb/java/rocksjni/transaction_db_options.cc b/src/rocksdb/java/rocksjni/transaction_db_options.cc new file mode 100644 index 000000000..4cf27121e --- /dev/null +++ b/src/rocksdb/java/rocksjni/transaction_db_options.cc @@ -0,0 +1,169 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for ROCKSDB_NAMESPACE::TransactionDBOptions. + +#include + +#include "include/org_rocksdb_TransactionDBOptions.h" +#include "rocksdb/utilities/transaction_db.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: newTransactionDBOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_TransactionDBOptions_newTransactionDBOptions( + JNIEnv* /*env*/, jclass /*jcls*/) { + ROCKSDB_NAMESPACE::TransactionDBOptions* opts = + new ROCKSDB_NAMESPACE::TransactionDBOptions(); + return GET_CPLUSPLUS_POINTER(opts); +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: getMaxNumLocks + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionDBOptions_getMaxNumLocks(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return opts->max_num_locks; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: setMaxNumLocks + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionDBOptions_setMaxNumLocks( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jlong jmax_num_locks) { + auto* opts = + reinterpret_cast(jhandle); + opts->max_num_locks = jmax_num_locks; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: getNumStripes + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionDBOptions_getNumStripes(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return opts->num_stripes; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: setNumStripes + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionDBOptions_setNumStripes(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jlong jnum_stripes) { + auto* opts = + reinterpret_cast(jhandle); + opts->num_stripes = jnum_stripes; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: getTransactionLockTimeout + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionDBOptions_getTransactionLockTimeout( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return opts->transaction_lock_timeout; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: setTransactionLockTimeout + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionDBOptions_setTransactionLockTimeout( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jtransaction_lock_timeout) { + auto* opts = + reinterpret_cast(jhandle); + opts->transaction_lock_timeout = jtransaction_lock_timeout; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: getDefaultLockTimeout + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionDBOptions_getDefaultLockTimeout( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return opts->default_lock_timeout; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: setDefaultLockTimeout + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionDBOptions_setDefaultLockTimeout( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jdefault_lock_timeout) { + auto* opts = + reinterpret_cast(jhandle); + opts->default_lock_timeout = jdefault_lock_timeout; +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: getWritePolicy + * Signature: (J)B + */ +jbyte Java_org_rocksdb_TransactionDBOptions_getWritePolicy(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return ROCKSDB_NAMESPACE::TxnDBWritePolicyJni::toJavaTxnDBWritePolicy( + opts->write_policy); +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: setWritePolicy + * Signature: (JB)V + */ +void Java_org_rocksdb_TransactionDBOptions_setWritePolicy(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jbyte jwrite_policy) { + auto* opts = + reinterpret_cast(jhandle); + opts->write_policy = + ROCKSDB_NAMESPACE::TxnDBWritePolicyJni::toCppTxnDBWritePolicy( + jwrite_policy); +} + +/* + * Class: org_rocksdb_TransactionDBOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionDBOptions_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/src/rocksdb/java/rocksjni/transaction_log.cc b/src/rocksdb/java/rocksjni/transaction_log.cc new file mode 100644 index 000000000..97c3bb301 --- /dev/null +++ b/src/rocksdb/java/rocksjni/transaction_log.cc @@ -0,0 +1,80 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::Iterator methods from Java side. + +#include "rocksdb/transaction_log.h" + +#include +#include +#include + +#include "include/org_rocksdb_TransactionLogIterator.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_TransactionLogIterator + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionLogIterator_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + delete reinterpret_cast(handle); +} + +/* + * Class: org_rocksdb_TransactionLogIterator + * Method: isValid + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_TransactionLogIterator_isValid(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + return reinterpret_cast(handle) + ->Valid(); +} + +/* + * Class: org_rocksdb_TransactionLogIterator + * Method: next + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionLogIterator_next(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->Next(); +} + +/* + * Class: org_rocksdb_TransactionLogIterator + * Method: status + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionLogIterator_status(JNIEnv* env, + jobject /*jobj*/, + jlong handle) { + ROCKSDB_NAMESPACE::Status s = + reinterpret_cast(handle) + ->status(); + if (!s.ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_TransactionLogIterator + * Method: getBatch + * Signature: (J)Lorg/rocksdb/TransactionLogIterator$BatchResult + */ +jobject Java_org_rocksdb_TransactionLogIterator_getBatch(JNIEnv* env, + jobject /*jobj*/, + jlong handle) { + ROCKSDB_NAMESPACE::BatchResult batch_result = + reinterpret_cast(handle) + ->GetBatch(); + return ROCKSDB_NAMESPACE::BatchResultJni::construct(env, batch_result); +} diff --git a/src/rocksdb/java/rocksjni/transaction_notifier.cc b/src/rocksdb/java/rocksjni/transaction_notifier.cc new file mode 100644 index 000000000..cefeb648a --- /dev/null +++ b/src/rocksdb/java/rocksjni/transaction_notifier.cc @@ -0,0 +1,44 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for ROCKSDB_NAMESPACE::TransactionNotifier. + +#include + +#include "include/org_rocksdb_AbstractTransactionNotifier.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/transaction_notifier_jnicallback.h" + +/* + * Class: org_rocksdb_AbstractTransactionNotifier + * Method: createNewTransactionNotifier + * Signature: ()J + */ +jlong Java_org_rocksdb_AbstractTransactionNotifier_createNewTransactionNotifier( + JNIEnv* env, jobject jobj) { + auto* transaction_notifier = + new ROCKSDB_NAMESPACE::TransactionNotifierJniCallback(env, jobj); + auto* sptr_transaction_notifier = + new std::shared_ptr( + transaction_notifier); + return GET_CPLUSPLUS_POINTER(sptr_transaction_notifier); +} + +/* + * Class: org_rocksdb_AbstractTransactionNotifier + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_AbstractTransactionNotifier_disposeInternal( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + // TODO(AR) refactor to use JniCallback::JniCallback + // when https://github.com/facebook/rocksdb/pull/1241/ is merged + std::shared_ptr* handle = + reinterpret_cast< + std::shared_ptr*>( + jhandle); + delete handle; +} diff --git a/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.cc b/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.cc new file mode 100644 index 000000000..26761cabd --- /dev/null +++ b/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::TransactionNotifier. + +#include "rocksjni/transaction_notifier_jnicallback.h" + +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +namespace ROCKSDB_NAMESPACE { + +TransactionNotifierJniCallback::TransactionNotifierJniCallback( + JNIEnv* env, jobject jtransaction_notifier) + : JniCallback(env, jtransaction_notifier) { + // we cache the method id for the JNI callback + m_jsnapshot_created_methodID = + AbstractTransactionNotifierJni::getSnapshotCreatedMethodId(env); +} + +void TransactionNotifierJniCallback::SnapshotCreated( + const Snapshot* newSnapshot) { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + assert(env != nullptr); + + env->CallVoidMethod(m_jcallback_obj, m_jsnapshot_created_methodID, + GET_CPLUSPLUS_POINTER(newSnapshot)); + + if (env->ExceptionCheck()) { + // exception thrown from CallVoidMethod + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + + releaseJniEnv(attached_thread); +} +} // namespace ROCKSDB_NAMESPACE diff --git a/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.h b/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.h new file mode 100644 index 000000000..089a5ee4a --- /dev/null +++ b/src/rocksdb/java/rocksjni/transaction_notifier_jnicallback.h @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::TransactionNotifier. + +#ifndef JAVA_ROCKSJNI_TRANSACTION_NOTIFIER_JNICALLBACK_H_ +#define JAVA_ROCKSJNI_TRANSACTION_NOTIFIER_JNICALLBACK_H_ + +#include + +#include "rocksdb/utilities/transaction.h" +#include "rocksjni/jnicallback.h" + +namespace ROCKSDB_NAMESPACE { + +/** + * This class acts as a bridge between C++ + * and Java. The methods in this class will be + * called back from the RocksDB TransactionDB or OptimisticTransactionDB (C++), + * we then callback to the appropriate Java method + * this enables TransactionNotifier to be implemented in Java. + * + * Unlike RocksJava's Comparator JNI Callback, we do not attempt + * to reduce Java object allocations by caching the Snapshot object + * presented to the callback. This could be revisited in future + * if performance is lacking. + */ +class TransactionNotifierJniCallback : public JniCallback, + public TransactionNotifier { + public: + TransactionNotifierJniCallback(JNIEnv* env, jobject jtransaction_notifier); + virtual void SnapshotCreated(const Snapshot* newSnapshot); + + private: + jmethodID m_jsnapshot_created_methodID; +}; +} // namespace ROCKSDB_NAMESPACE + +#endif // JAVA_ROCKSJNI_TRANSACTION_NOTIFIER_JNICALLBACK_H_ diff --git a/src/rocksdb/java/rocksjni/transaction_options.cc b/src/rocksdb/java/rocksjni/transaction_options.cc new file mode 100644 index 000000000..dcf363e14 --- /dev/null +++ b/src/rocksdb/java/rocksjni/transaction_options.cc @@ -0,0 +1,191 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ +// for ROCKSDB_NAMESPACE::TransactionOptions. + +#include + +#include "include/org_rocksdb_TransactionOptions.h" +#include "rocksdb/utilities/transaction_db.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_TransactionOptions + * Method: newTransactionOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_TransactionOptions_newTransactionOptions( + JNIEnv* /*env*/, jclass /*jcls*/) { + auto* opts = new ROCKSDB_NAMESPACE::TransactionOptions(); + return GET_CPLUSPLUS_POINTER(opts); +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: isSetSnapshot + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_TransactionOptions_isSetSnapshot(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return opts->set_snapshot; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: setSetSnapshot + * Signature: (JZ)V + */ +void Java_org_rocksdb_TransactionOptions_setSetSnapshot( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, jboolean jset_snapshot) { + auto* opts = + reinterpret_cast(jhandle); + opts->set_snapshot = jset_snapshot; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: isDeadlockDetect + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_TransactionOptions_isDeadlockDetect(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return opts->deadlock_detect; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: setDeadlockDetect + * Signature: (JZ)V + */ +void Java_org_rocksdb_TransactionOptions_setDeadlockDetect( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jboolean jdeadlock_detect) { + auto* opts = + reinterpret_cast(jhandle); + opts->deadlock_detect = jdeadlock_detect; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: getLockTimeout + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionOptions_getLockTimeout(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return opts->lock_timeout; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: setLockTimeout + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionOptions_setLockTimeout(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jlong jlock_timeout) { + auto* opts = + reinterpret_cast(jhandle); + opts->lock_timeout = jlock_timeout; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: getExpiration + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionOptions_getExpiration(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return opts->expiration; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: setExpiration + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionOptions_setExpiration(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle, + jlong jexpiration) { + auto* opts = + reinterpret_cast(jhandle); + opts->expiration = jexpiration; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: getDeadlockDetectDepth + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionOptions_getDeadlockDetectDepth( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return opts->deadlock_detect_depth; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: setDeadlockDetectDepth + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionOptions_setDeadlockDetectDepth( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jdeadlock_detect_depth) { + auto* opts = + reinterpret_cast(jhandle); + opts->deadlock_detect_depth = jdeadlock_detect_depth; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: getMaxWriteBatchSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_TransactionOptions_getMaxWriteBatchSize(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* opts = + reinterpret_cast(jhandle); + return opts->max_write_batch_size; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: setMaxWriteBatchSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_TransactionOptions_setMaxWriteBatchSize( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jhandle, + jlong jmax_write_batch_size) { + auto* opts = + reinterpret_cast(jhandle); + opts->max_write_batch_size = jmax_write_batch_size; +} + +/* + * Class: org_rocksdb_TransactionOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionOptions_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/src/rocksdb/java/rocksjni/ttl.cc b/src/rocksdb/java/rocksjni/ttl.cc new file mode 100644 index 000000000..1fe2083d9 --- /dev/null +++ b/src/rocksdb/java/rocksjni/ttl.cc @@ -0,0 +1,212 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::TtlDB methods. +// from Java side. + +#include +#include +#include + +#include +#include +#include + +#include "include/org_rocksdb_TtlDB.h" +#include "rocksdb/utilities/db_ttl.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_TtlDB + * Method: open + * Signature: (JLjava/lang/String;IZ)J + */ +jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env, jclass, jlong joptions_handle, + jstring jdb_path, jint jttl, + jboolean jread_only) { + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + + auto* opt = reinterpret_cast(joptions_handle); + ROCKSDB_NAMESPACE::DBWithTTL* db = nullptr; + ROCKSDB_NAMESPACE::Status s = + ROCKSDB_NAMESPACE::DBWithTTL::Open(*opt, db_path, &db, jttl, jread_only); + env->ReleaseStringUTFChars(jdb_path, db_path); + + // as TTLDB extends RocksDB on the java side, we can reuse + // the RocksDB portal here. + if (s.ok()) { + return GET_CPLUSPLUS_POINTER(db); + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return 0; + } +} + +/* + * Class: org_rocksdb_TtlDB + * Method: openCF + * Signature: (JLjava/lang/String;[[B[J[IZ)[J + */ +jlongArray Java_org_rocksdb_TtlDB_openCF(JNIEnv* env, jclass, jlong jopt_handle, + jstring jdb_path, + jobjectArray jcolumn_names, + jlongArray jcolumn_options, + jintArray jttls, jboolean jread_only) { + const char* db_path = env->GetStringUTFChars(jdb_path, nullptr); + if (db_path == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + + const jsize len_cols = env->GetArrayLength(jcolumn_names); + jlong* jco = env->GetLongArrayElements(jcolumn_options, nullptr); + if (jco == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + std::vector column_families; + jboolean has_exception = JNI_FALSE; + ROCKSDB_NAMESPACE::JniUtil::byteStrings( + env, jcolumn_names, + [](const char* str_data, const size_t str_len) { + return std::string(str_data, str_len); + }, + [&jco, &column_families](size_t idx, std::string cf_name) { + ROCKSDB_NAMESPACE::ColumnFamilyOptions* cf_options = + reinterpret_cast(jco[idx]); + column_families.push_back( + ROCKSDB_NAMESPACE::ColumnFamilyDescriptor(cf_name, *cf_options)); + }, + &has_exception); + + env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT); + + if (has_exception == JNI_TRUE) { + // exception occurred + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + + std::vector ttl_values; + jint* jttlv = env->GetIntArrayElements(jttls, nullptr); + if (jttlv == nullptr) { + // exception thrown: OutOfMemoryError + env->ReleaseStringUTFChars(jdb_path, db_path); + return nullptr; + } + const jsize len_ttls = env->GetArrayLength(jttls); + for (jsize i = 0; i < len_ttls; i++) { + ttl_values.push_back(jttlv[i]); + } + env->ReleaseIntArrayElements(jttls, jttlv, JNI_ABORT); + + auto* opt = reinterpret_cast(jopt_handle); + std::vector handles; + ROCKSDB_NAMESPACE::DBWithTTL* db = nullptr; + ROCKSDB_NAMESPACE::Status s = ROCKSDB_NAMESPACE::DBWithTTL::Open( + *opt, db_path, column_families, &handles, &db, ttl_values, jread_only); + + // we have now finished with db_path + env->ReleaseStringUTFChars(jdb_path, db_path); + + // check if open operation was successful + if (s.ok()) { + const jsize resultsLen = 1 + len_cols; // db handle + column family handles + std::unique_ptr results = + std::unique_ptr(new jlong[resultsLen]); + results[0] = GET_CPLUSPLUS_POINTER(db); + for (int i = 1; i <= len_cols; i++) { + results[i] = GET_CPLUSPLUS_POINTER(handles[i - 1]); + } + + jlongArray jresults = env->NewLongArray(resultsLen); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetLongArrayRegion(jresults, 0, resultsLen, results.get()); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresults); + return nullptr; + } + + return jresults; + } else { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return NULL; + } +} + +/* + * Class: org_rocksdb_TtlDB + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TtlDB_disposeInternal(JNIEnv*, jobject, jlong jhandle) { + auto* ttl_db = reinterpret_cast(jhandle); + assert(ttl_db != nullptr); + delete ttl_db; +} + +/* + * Class: org_rocksdb_TtlDB + * Method: closeDatabase + * Signature: (J)V + */ +void Java_org_rocksdb_TtlDB_closeDatabase(JNIEnv* /* env */, jclass, + jlong /* jhandle */) { + // auto* ttl_db = reinterpret_cast(jhandle); + // assert(ttl_db != nullptr); + // ROCKSDB_NAMESPACE::Status s = ttl_db->Close(); + // ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + + // TODO(AR) this is disabled until + // https://github.com/facebook/rocksdb/issues/4818 is resolved! +} + +/* + * Class: org_rocksdb_TtlDB + * Method: createColumnFamilyWithTtl + * Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;[BJI)J; + */ +jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl(JNIEnv* env, jobject, + jlong jdb_handle, + jbyteArray jcolumn_name, + jlong jcolumn_options, + jint jttl) { + jbyte* cfname = env->GetByteArrayElements(jcolumn_name, nullptr); + if (cfname == nullptr) { + // exception thrown: OutOfMemoryError + return 0; + } + const jsize len = env->GetArrayLength(jcolumn_name); + + auto* cfOptions = reinterpret_cast( + jcolumn_options); + + auto* db_handle = reinterpret_cast(jdb_handle); + ROCKSDB_NAMESPACE::ColumnFamilyHandle* handle; + ROCKSDB_NAMESPACE::Status s = db_handle->CreateColumnFamilyWithTtl( + *cfOptions, std::string(reinterpret_cast(cfname), len), &handle, + jttl); + + env->ReleaseByteArrayElements(jcolumn_name, cfname, JNI_ABORT); + + if (s.ok()) { + return GET_CPLUSPLUS_POINTER(handle); + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); + return 0; +} diff --git a/src/rocksdb/java/rocksjni/wal_filter.cc b/src/rocksdb/java/rocksjni/wal_filter.cc new file mode 100644 index 000000000..24b88afed --- /dev/null +++ b/src/rocksdb/java/rocksjni/wal_filter.cc @@ -0,0 +1,24 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::WalFilter. + +#include + +#include "include/org_rocksdb_AbstractWalFilter.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/wal_filter_jnicallback.h" + +/* + * Class: org_rocksdb_AbstractWalFilter + * Method: createNewWalFilter + * Signature: ()J + */ +jlong Java_org_rocksdb_AbstractWalFilter_createNewWalFilter(JNIEnv* env, + jobject jobj) { + auto* wal_filter = new ROCKSDB_NAMESPACE::WalFilterJniCallback(env, jobj); + return GET_CPLUSPLUS_POINTER(wal_filter); +} diff --git a/src/rocksdb/java/rocksjni/wal_filter_jnicallback.cc b/src/rocksdb/java/rocksjni/wal_filter_jnicallback.cc new file mode 100644 index 000000000..d2e3c9076 --- /dev/null +++ b/src/rocksdb/java/rocksjni/wal_filter_jnicallback.cc @@ -0,0 +1,139 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::WalFilter. + +#include "rocksjni/wal_filter_jnicallback.h" + +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +namespace ROCKSDB_NAMESPACE { +WalFilterJniCallback::WalFilterJniCallback(JNIEnv* env, jobject jwal_filter) + : JniCallback(env, jwal_filter) { + // Note: The name of a WalFilter will not change during it's lifetime, + // so we cache it in a global var + jmethodID jname_mid = AbstractWalFilterJni::getNameMethodId(env); + if (jname_mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + jstring jname = (jstring)env->CallObjectMethod(m_jcallback_obj, jname_mid); + if (env->ExceptionCheck()) { + // exception thrown + return; + } + jboolean has_exception = JNI_FALSE; + m_name = JniUtil::copyString(env, jname, + &has_exception); // also releases jname + if (has_exception == JNI_TRUE) { + // exception thrown + return; + } + + m_column_family_log_number_map_mid = + AbstractWalFilterJni::getColumnFamilyLogNumberMapMethodId(env); + if (m_column_family_log_number_map_mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } + + m_log_record_found_proxy_mid = + AbstractWalFilterJni::getLogRecordFoundProxyMethodId(env); + if (m_log_record_found_proxy_mid == nullptr) { + // exception thrown: NoSuchMethodException or OutOfMemoryError + return; + } +} + +void WalFilterJniCallback::ColumnFamilyLogNumberMap( + const std::map& cf_lognumber_map, + const std::map& cf_name_id_map) { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + if (env == nullptr) { + return; + } + + jobject jcf_lognumber_map = + ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(env, &cf_lognumber_map); + if (jcf_lognumber_map == nullptr) { + // exception occurred + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return; + } + + jobject jcf_name_id_map = + ROCKSDB_NAMESPACE::HashMapJni::fromCppMap(env, &cf_name_id_map); + if (jcf_name_id_map == nullptr) { + // exception occurred + env->ExceptionDescribe(); // print out exception to stderr + env->DeleteLocalRef(jcf_lognumber_map); + releaseJniEnv(attached_thread); + return; + } + + env->CallVoidMethod(m_jcallback_obj, m_column_family_log_number_map_mid, + jcf_lognumber_map, jcf_name_id_map); + + env->DeleteLocalRef(jcf_lognumber_map); + env->DeleteLocalRef(jcf_name_id_map); + + if (env->ExceptionCheck()) { + // exception thrown from CallVoidMethod + env->ExceptionDescribe(); // print out exception to stderr + } + + releaseJniEnv(attached_thread); +} + +WalFilter::WalProcessingOption WalFilterJniCallback::LogRecordFound( + unsigned long long log_number, const std::string& log_file_name, + const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed) { + jboolean attached_thread = JNI_FALSE; + JNIEnv* env = getJniEnv(&attached_thread); + if (env == nullptr) { + return WalFilter::WalProcessingOption::kCorruptedRecord; + } + + jstring jlog_file_name = JniUtil::toJavaString(env, &log_file_name); + if (jlog_file_name == nullptr) { + // exception occcurred + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return WalFilter::WalProcessingOption::kCorruptedRecord; + } + + jshort jlog_record_found_result = env->CallShortMethod( + m_jcallback_obj, m_log_record_found_proxy_mid, + static_cast(log_number), jlog_file_name, + GET_CPLUSPLUS_POINTER(&batch), GET_CPLUSPLUS_POINTER(new_batch)); + + env->DeleteLocalRef(jlog_file_name); + + if (env->ExceptionCheck()) { + // exception thrown from CallShortMethod + env->ExceptionDescribe(); // print out exception to stderr + releaseJniEnv(attached_thread); + return WalFilter::WalProcessingOption::kCorruptedRecord; + } + + // unpack WalProcessingOption and batch_changed from jlog_record_found_result + jbyte jwal_processing_option_value = (jlog_record_found_result >> 8) & 0xFF; + jbyte jbatch_changed_value = jlog_record_found_result & 0xFF; + + releaseJniEnv(attached_thread); + + *batch_changed = jbatch_changed_value == JNI_TRUE; + + return WalProcessingOptionJni::toCppWalProcessingOption( + jwal_processing_option_value); +} + +const char* WalFilterJniCallback::Name() const { return m_name.get(); } + +} // namespace ROCKSDB_NAMESPACE diff --git a/src/rocksdb/java/rocksjni/wal_filter_jnicallback.h b/src/rocksdb/java/rocksjni/wal_filter_jnicallback.h new file mode 100644 index 000000000..5cdc65978 --- /dev/null +++ b/src/rocksdb/java/rocksjni/wal_filter_jnicallback.h @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::WalFilter. + +#ifndef JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_ +#define JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_ + +#include + +#include +#include +#include + +#include "rocksdb/wal_filter.h" +#include "rocksjni/jnicallback.h" + +namespace ROCKSDB_NAMESPACE { + +class WalFilterJniCallback : public JniCallback, public WalFilter { + public: + WalFilterJniCallback(JNIEnv* env, jobject jwal_filter); + virtual void ColumnFamilyLogNumberMap( + const std::map& cf_lognumber_map, + const std::map& cf_name_id_map); + virtual WalFilter::WalProcessingOption LogRecordFound( + unsigned long long log_number, const std::string& log_file_name, + const WriteBatch& batch, WriteBatch* new_batch, bool* batch_changed); + virtual const char* Name() const; + + private: + std::unique_ptr m_name; + jmethodID m_column_family_log_number_map_mid; + jmethodID m_log_record_found_proxy_mid; +}; + +} // namespace ROCKSDB_NAMESPACE + +#endif // JAVA_ROCKSJNI_WAL_FILTER_JNICALLBACK_H_ diff --git a/src/rocksdb/java/rocksjni/write_batch.cc b/src/rocksdb/java/rocksjni/write_batch.cc new file mode 100644 index 000000000..6704e4a7e --- /dev/null +++ b/src/rocksdb/java/rocksjni/write_batch.cc @@ -0,0 +1,676 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::WriteBatch methods from Java side. +#include "rocksdb/write_batch.h" + +#include + +#include "db/memtable.h" +#include "db/write_batch_internal.h" +#include "include/org_rocksdb_WriteBatch.h" +#include "include/org_rocksdb_WriteBatch_Handler.h" +#include "logging/logging.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/status.h" +#include "rocksdb/write_buffer_manager.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" +#include "rocksjni/writebatchhandlerjnicallback.h" +#include "table/scoped_arena_iterator.h" + +/* + * Class: org_rocksdb_WriteBatch + * Method: newWriteBatch + * Signature: (I)J + */ +jlong Java_org_rocksdb_WriteBatch_newWriteBatch__I(JNIEnv* /*env*/, + jclass /*jcls*/, + jint jreserved_bytes) { + auto* wb = + new ROCKSDB_NAMESPACE::WriteBatch(static_cast(jreserved_bytes)); + return GET_CPLUSPLUS_POINTER(wb); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: newWriteBatch + * Signature: ([BI)J + */ +jlong Java_org_rocksdb_WriteBatch_newWriteBatch___3BI(JNIEnv* env, + jclass /*jcls*/, + jbyteArray jserialized, + jint jserialized_length) { + jboolean has_exception = JNI_FALSE; + std::string serialized = ROCKSDB_NAMESPACE::JniUtil::byteString( + env, jserialized, jserialized_length, + [](const char* str, const size_t len) { return std::string(str, len); }, + &has_exception); + if (has_exception == JNI_TRUE) { + // exception occurred + return 0; + } + + auto* wb = new ROCKSDB_NAMESPACE::WriteBatch(serialized); + return GET_CPLUSPLUS_POINTER(wb); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: count0 + * Signature: (J)I + */ +jint Java_org_rocksdb_WriteBatch_count0(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return static_cast(wb->Count()); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: clear0 + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatch_clear0(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + wb->Clear(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: setSavePoint0 + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatch_setSavePoint0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + wb->SetSavePoint(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: rollbackToSavePoint0 + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatch_rollbackToSavePoint0(JNIEnv* env, + jobject /*jobj*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + auto s = wb->RollbackToSavePoint(); + + if (s.ok()) { + return; + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: popSavePoint + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatch_popSavePoint(JNIEnv* env, jobject /*jobj*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + auto s = wb->PopSavePoint(); + + if (s.ok()) { + return; + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: setMaxBytes + * Signature: (JJ)V + */ +void Java_org_rocksdb_WriteBatch_setMaxBytes(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jwb_handle, + jlong jmax_bytes) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + wb->SetMaxBytes(static_cast(jmax_bytes)); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: put + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_WriteBatch_put__J_3BI_3BI(JNIEnv* env, jobject jobj, + jlong jwb_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, + jint jentry_value_len) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto put = [&wb](ROCKSDB_NAMESPACE::Slice key, + ROCKSDB_NAMESPACE::Slice value) { + return wb->Put(key, value); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, + jentry_value, jentry_value_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: put + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_WriteBatch_put__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto put = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice key, + ROCKSDB_NAMESPACE::Slice value) { + return wb->Put(cf_handle, key, value); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, + jentry_value, jentry_value_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: putDirect + * Signature: (JLjava/nio/ByteBuffer;IILjava/nio/ByteBuffer;IIJ)V + */ +void Java_org_rocksdb_WriteBatch_putDirect(JNIEnv* env, jobject /*jobj*/, + jlong jwb_handle, jobject jkey, + jint jkey_offset, jint jkey_len, + jobject jval, jint jval_offset, + jint jval_len, jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + auto put = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice& key, + ROCKSDB_NAMESPACE::Slice& value) { + if (cf_handle == nullptr) { + wb->Put(key, value); + } else { + wb->Put(cf_handle, key, value); + } + }; + ROCKSDB_NAMESPACE::JniUtil::kv_op_direct( + put, env, jkey, jkey_offset, jkey_len, jval, jval_offset, jval_len); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: merge + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto merge = [&wb](ROCKSDB_NAMESPACE::Slice key, + ROCKSDB_NAMESPACE::Slice value) { + return wb->Merge(key, value); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, + jentry_value, jentry_value_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: merge + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto merge = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice key, + ROCKSDB_NAMESPACE::Slice value) { + return wb->Merge(cf_handle, key, value); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, + jentry_value, jentry_value_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: delete + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WriteBatch_delete__J_3BI(JNIEnv* env, jobject jobj, + jlong jwb_handle, + jbyteArray jkey, jint jkey_len) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto remove = [&wb](ROCKSDB_NAMESPACE::Slice key) { return wb->Delete(key); }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::k_op(remove, env, jobj, jkey, jkey_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: delete + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_WriteBatch_delete__J_3BIJ(JNIEnv* env, jobject jobj, + jlong jwb_handle, + jbyteArray jkey, jint jkey_len, + jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto remove = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice key) { + return wb->Delete(cf_handle, key); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::k_op(remove, env, jobj, jkey, jkey_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: singleDelete + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WriteBatch_singleDelete__J_3BI(JNIEnv* env, jobject jobj, + jlong jwb_handle, + jbyteArray jkey, + jint jkey_len) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto single_delete = [&wb](ROCKSDB_NAMESPACE::Slice key) { + return wb->SingleDelete(key); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::k_op(single_delete, env, jobj, jkey, + jkey_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: singleDelete + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_WriteBatch_singleDelete__J_3BIJ(JNIEnv* env, jobject jobj, + jlong jwb_handle, + jbyteArray jkey, + jint jkey_len, + jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto single_delete = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice key) { + return wb->SingleDelete(cf_handle, key); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::k_op(single_delete, env, jobj, jkey, + jkey_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: deleteDirect + * Signature: (JLjava/nio/ByteBuffer;IIJ)V + */ +void Java_org_rocksdb_WriteBatch_deleteDirect(JNIEnv* env, jobject /*jobj*/, + jlong jwb_handle, jobject jkey, + jint jkey_offset, jint jkey_len, + jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + auto remove = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice& key) { + if (cf_handle == nullptr) { + wb->Delete(key); + } else { + wb->Delete(cf_handle, key); + } + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_direct(remove, env, jkey, jkey_offset, + jkey_len); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: deleteRange + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_WriteBatch_deleteRange__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jbegin_key, + jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto deleteRange = [&wb](ROCKSDB_NAMESPACE::Slice beginKey, + ROCKSDB_NAMESPACE::Slice endKey) { + return wb->DeleteRange(beginKey, endKey); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key, + jbegin_key_len, jend_key, jend_key_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: deleteRange + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_WriteBatch_deleteRange__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jbegin_key, + jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len, + jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto deleteRange = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice beginKey, + ROCKSDB_NAMESPACE::Slice endKey) { + return wb->DeleteRange(cf_handle, beginKey, endKey); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key, + jbegin_key_len, jend_key, jend_key_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: putLogData + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WriteBatch_putLogData(JNIEnv* env, jobject jobj, + jlong jwb_handle, jbyteArray jblob, + jint jblob_len) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto putLogData = [&wb](ROCKSDB_NAMESPACE::Slice blob) { + return wb->PutLogData(blob); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::k_op(putLogData, env, jobj, jblob, jblob_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: iterate + * Signature: (JJ)V + */ +void Java_org_rocksdb_WriteBatch_iterate(JNIEnv* env, jobject /*jobj*/, + jlong jwb_handle, + jlong handlerHandle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + ROCKSDB_NAMESPACE::Status s = wb->Iterate( + reinterpret_cast( + handlerHandle)); + + if (s.ok()) { + return; + } + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: data + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_WriteBatch_data(JNIEnv* env, jobject /*jobj*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + auto data = wb->Data(); + return ROCKSDB_NAMESPACE::JniUtil::copyBytes(env, data); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: getDataSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_WriteBatch_getDataSize(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + auto data_size = wb->GetDataSize(); + return static_cast(data_size); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: hasPut + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteBatch_hasPut(JNIEnv* /*env*/, jobject /*jobj*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return wb->HasPut(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: hasDelete + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteBatch_hasDelete(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return wb->HasDelete(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: hasSingleDelete + * Signature: (J)Z + */ +JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasSingleDelete( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return wb->HasSingleDelete(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: hasDeleteRange + * Signature: (J)Z + */ +JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasDeleteRange( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return wb->HasDeleteRange(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: hasMerge + * Signature: (J)Z + */ +JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasMerge( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return wb->HasMerge(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: hasBeginPrepare + * Signature: (J)Z + */ +JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasBeginPrepare( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return wb->HasBeginPrepare(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: hasEndPrepare + * Signature: (J)Z + */ +JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasEndPrepare( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return wb->HasEndPrepare(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: hasCommit + * Signature: (J)Z + */ +JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasCommit( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return wb->HasCommit(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: hasRollback + * Signature: (J)Z + */ +JNIEXPORT jboolean JNICALL Java_org_rocksdb_WriteBatch_hasRollback( + JNIEnv* /*env*/, jobject /*jobj*/, jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return wb->HasRollback(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: markWalTerminationPoint + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatch_markWalTerminationPoint(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + wb->MarkWalTerminationPoint(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: getWalTerminationPoint + * Signature: (J)Lorg/rocksdb/WriteBatch/SavePoint; + */ +jobject Java_org_rocksdb_WriteBatch_getWalTerminationPoint(JNIEnv* env, + jobject /*jobj*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + auto save_point = wb->GetWalTerminationPoint(); + return ROCKSDB_NAMESPACE::WriteBatchSavePointJni::construct(env, save_point); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatch_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + auto* wb = reinterpret_cast(handle); + assert(wb != nullptr); + delete wb; +} + +/* + * Class: org_rocksdb_WriteBatch_Handler + * Method: createNewHandler0 + * Signature: ()J + */ +jlong Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0(JNIEnv* env, + jobject jobj) { + auto* wbjnic = new ROCKSDB_NAMESPACE::WriteBatchHandlerJniCallback(env, jobj); + return GET_CPLUSPLUS_POINTER(wbjnic); +} diff --git a/src/rocksdb/java/rocksjni/write_batch_test.cc b/src/rocksdb/java/rocksjni/write_batch_test.cc new file mode 100644 index 000000000..30b9a7229 --- /dev/null +++ b/src/rocksdb/java/rocksjni/write_batch_test.cc @@ -0,0 +1,199 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::WriteBatch methods testing from Java side. +#include "rocksdb/write_batch.h" + +#include + +#include "db/memtable.h" +#include "db/write_batch_internal.h" +#include "include/org_rocksdb_WriteBatch.h" +#include "include/org_rocksdb_WriteBatchTest.h" +#include "include/org_rocksdb_WriteBatchTestInternalHelper.h" +#include "include/org_rocksdb_WriteBatch_Handler.h" +#include "options/cf_options.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/status.h" +#include "rocksdb/write_buffer_manager.h" +#include "rocksjni/portal.h" +#include "table/scoped_arena_iterator.h" +#include "test_util/testharness.h" +#include "util/string_util.h" + +/* + * Class: org_rocksdb_WriteBatchTest + * Method: getContents + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(JNIEnv* env, + jclass /*jclazz*/, + jlong jwb_handle) { + auto* b = reinterpret_cast(jwb_handle); + assert(b != nullptr); + + // todo: Currently the following code is directly copied from + // db/write_bench_test.cc. It could be implemented in java once + // all the necessary components can be accessed via jni api. + + ROCKSDB_NAMESPACE::InternalKeyComparator cmp( + ROCKSDB_NAMESPACE::BytewiseComparator()); + auto factory = std::make_shared(); + ROCKSDB_NAMESPACE::Options options; + ROCKSDB_NAMESPACE::WriteBufferManager wb(options.db_write_buffer_size); + options.memtable_factory = factory; + ROCKSDB_NAMESPACE::MemTable* mem = new ROCKSDB_NAMESPACE::MemTable( + cmp, ROCKSDB_NAMESPACE::ImmutableOptions(options), + ROCKSDB_NAMESPACE::MutableCFOptions(options), &wb, + ROCKSDB_NAMESPACE::kMaxSequenceNumber, 0 /* column_family_id */); + mem->Ref(); + std::string state; + ROCKSDB_NAMESPACE::ColumnFamilyMemTablesDefault cf_mems_default(mem); + ROCKSDB_NAMESPACE::Status s = + ROCKSDB_NAMESPACE::WriteBatchInternal::InsertInto(b, &cf_mems_default, + nullptr, nullptr); + unsigned int count = 0; + ROCKSDB_NAMESPACE::Arena arena; + ROCKSDB_NAMESPACE::ScopedArenaIterator iter( + mem->NewIterator(ROCKSDB_NAMESPACE::ReadOptions(), &arena)); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ROCKSDB_NAMESPACE::ParsedInternalKey ikey; + ikey.clear(); + ROCKSDB_NAMESPACE::Status pik_status = ROCKSDB_NAMESPACE::ParseInternalKey( + iter->key(), &ikey, true /* log_err_key */); + pik_status.PermitUncheckedError(); + assert(pik_status.ok()); + switch (ikey.type) { + case ROCKSDB_NAMESPACE::kTypeValue: + state.append("Put("); + state.append(ikey.user_key.ToString()); + state.append(", "); + state.append(iter->value().ToString()); + state.append(")"); + count++; + break; + case ROCKSDB_NAMESPACE::kTypeMerge: + state.append("Merge("); + state.append(ikey.user_key.ToString()); + state.append(", "); + state.append(iter->value().ToString()); + state.append(")"); + count++; + break; + case ROCKSDB_NAMESPACE::kTypeDeletion: + state.append("Delete("); + state.append(ikey.user_key.ToString()); + state.append(")"); + count++; + break; + case ROCKSDB_NAMESPACE::kTypeSingleDeletion: + state.append("SingleDelete("); + state.append(ikey.user_key.ToString()); + state.append(")"); + count++; + break; + case ROCKSDB_NAMESPACE::kTypeRangeDeletion: + state.append("DeleteRange("); + state.append(ikey.user_key.ToString()); + state.append(", "); + state.append(iter->value().ToString()); + state.append(")"); + count++; + break; + case ROCKSDB_NAMESPACE::kTypeLogData: + state.append("LogData("); + state.append(ikey.user_key.ToString()); + state.append(")"); + count++; + break; + default: + assert(false); + state.append("Err:Expected("); + state.append(std::to_string(ikey.type)); + state.append(")"); + count++; + break; + } + state.append("@"); + state.append(std::to_string(ikey.sequence)); + } + if (!s.ok()) { + state.append(s.ToString()); + } else if (ROCKSDB_NAMESPACE::WriteBatchInternal::Count(b) != count) { + state.append("Err:CountMismatch(expected="); + state.append( + std::to_string(ROCKSDB_NAMESPACE::WriteBatchInternal::Count(b))); + state.append(", actual="); + state.append(std::to_string(count)); + state.append(")"); + } + delete mem->Unref(); + + jbyteArray jstate = env->NewByteArray(static_cast(state.size())); + if (jstate == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + + env->SetByteArrayRegion( + jstate, 0, static_cast(state.size()), + const_cast(reinterpret_cast(state.c_str()))); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jstate); + return nullptr; + } + + return jstate; +} + +/* + * Class: org_rocksdb_WriteBatchTestInternalHelper + * Method: setSequence + * Signature: (JJ)V + */ +void Java_org_rocksdb_WriteBatchTestInternalHelper_setSequence( + JNIEnv* /*env*/, jclass /*jclazz*/, jlong jwb_handle, jlong jsn) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + ROCKSDB_NAMESPACE::WriteBatchInternal::SetSequence( + wb, static_cast(jsn)); +} + +/* + * Class: org_rocksdb_WriteBatchTestInternalHelper + * Method: sequence + * Signature: (J)J + */ +jlong Java_org_rocksdb_WriteBatchTestInternalHelper_sequence(JNIEnv* /*env*/, + jclass /*jclazz*/, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return static_cast( + ROCKSDB_NAMESPACE::WriteBatchInternal::Sequence(wb)); +} + +/* + * Class: org_rocksdb_WriteBatchTestInternalHelper + * Method: append + * Signature: (JJ)V + */ +void Java_org_rocksdb_WriteBatchTestInternalHelper_append(JNIEnv* /*env*/, + jclass /*jclazz*/, + jlong jwb_handle_1, + jlong jwb_handle_2) { + auto* wb1 = reinterpret_cast(jwb_handle_1); + assert(wb1 != nullptr); + auto* wb2 = reinterpret_cast(jwb_handle_2); + assert(wb2 != nullptr); + + ROCKSDB_NAMESPACE::WriteBatchInternal::Append(wb1, wb2); +} diff --git a/src/rocksdb/java/rocksjni/write_batch_with_index.cc b/src/rocksdb/java/rocksjni/write_batch_with_index.cc new file mode 100644 index 000000000..a5c3216cb --- /dev/null +++ b/src/rocksdb/java/rocksjni/write_batch_with_index.cc @@ -0,0 +1,953 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ ROCKSDB_NAMESPACE::WriteBatchWithIndex methods from Java side. + +#include "rocksdb/utilities/write_batch_with_index.h" + +#include "include/org_rocksdb_WBWIRocksIterator.h" +#include "include/org_rocksdb_WriteBatchWithIndex.h" +#include "rocksdb/comparator.h" +#include "rocksjni/cplusplus_to_java_convert.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: newWriteBatchWithIndex + * Signature: ()J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__( + JNIEnv* /*env*/, jclass /*jcls*/) { + auto* wbwi = new ROCKSDB_NAMESPACE::WriteBatchWithIndex(); + return GET_CPLUSPLUS_POINTER(wbwi); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: newWriteBatchWithIndex + * Signature: (Z)J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z( + JNIEnv* /*env*/, jclass /*jcls*/, jboolean joverwrite_key) { + auto* wbwi = new ROCKSDB_NAMESPACE::WriteBatchWithIndex( + ROCKSDB_NAMESPACE::BytewiseComparator(), 0, + static_cast(joverwrite_key)); + return GET_CPLUSPLUS_POINTER(wbwi); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: newWriteBatchWithIndex + * Signature: (JBIZ)J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JBIZ( + JNIEnv* /*env*/, jclass /*jcls*/, jlong jfallback_index_comparator_handle, + jbyte jcomparator_type, jint jreserved_bytes, jboolean joverwrite_key) { + ROCKSDB_NAMESPACE::Comparator* fallback_comparator = nullptr; + switch (jcomparator_type) { + // JAVA_COMPARATOR + case 0x0: + fallback_comparator = + reinterpret_cast( + jfallback_index_comparator_handle); + break; + + // JAVA_NATIVE_COMPARATOR_WRAPPER + case 0x1: + fallback_comparator = reinterpret_cast( + jfallback_index_comparator_handle); + break; + } + auto* wbwi = new ROCKSDB_NAMESPACE::WriteBatchWithIndex( + fallback_comparator, static_cast(jreserved_bytes), + static_cast(joverwrite_key)); + return GET_CPLUSPLUS_POINTER(wbwi); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: count0 + * Signature: (J)I + */ +jint Java_org_rocksdb_WriteBatchWithIndex_count0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jwbwi_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + + return static_cast(wbwi->GetWriteBatch()->Count()); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: put + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto put = [&wbwi](ROCKSDB_NAMESPACE::Slice key, + ROCKSDB_NAMESPACE::Slice value) { + return wbwi->Put(key, value); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, + jentry_value, jentry_value_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: put + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len, + jlong jcf_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto put = [&wbwi, &cf_handle](ROCKSDB_NAMESPACE::Slice key, + ROCKSDB_NAMESPACE::Slice value) { + return wbwi->Put(cf_handle, key, value); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, + jentry_value, jentry_value_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: putDirect + * Signature: (JLjava/nio/ByteBuffer;IILjava/nio/ByteBuffer;IIJ)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_putDirect( + JNIEnv* env, jobject /*jobj*/, jlong jwb_handle, jobject jkey, + jint jkey_offset, jint jkey_len, jobject jval, jint jval_offset, + jint jval_len, jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + auto put = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice& key, + ROCKSDB_NAMESPACE::Slice& value) { + if (cf_handle == nullptr) { + wb->Put(key, value); + } else { + wb->Put(cf_handle, key, value); + } + }; + ROCKSDB_NAMESPACE::JniUtil::kv_op_direct( + put, env, jkey, jkey_offset, jkey_len, jval, jval_offset, jval_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: merge + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto merge = [&wbwi](ROCKSDB_NAMESPACE::Slice key, + ROCKSDB_NAMESPACE::Slice value) { + return wbwi->Merge(key, value); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, + jentry_value, jentry_value_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: merge + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len, + jlong jcf_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto merge = [&wbwi, &cf_handle](ROCKSDB_NAMESPACE::Slice key, + ROCKSDB_NAMESPACE::Slice value) { + return wbwi->Merge(cf_handle, key, value); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, + jentry_value, jentry_value_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: delete + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_delete__J_3BI(JNIEnv* env, + jobject jobj, + jlong jwbwi_handle, + jbyteArray jkey, + jint jkey_len) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto remove = [&wbwi](ROCKSDB_NAMESPACE::Slice key) { + return wbwi->Delete(key); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::k_op(remove, env, jobj, jkey, jkey_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: delete + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_delete__J_3BIJ( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jlong jcf_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto remove = [&wbwi, &cf_handle](ROCKSDB_NAMESPACE::Slice key) { + return wbwi->Delete(cf_handle, key); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::k_op(remove, env, jobj, jkey, jkey_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: singleDelete + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_singleDelete__J_3BI( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto single_delete = [&wbwi](ROCKSDB_NAMESPACE::Slice key) { + return wbwi->SingleDelete(key); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::k_op(single_delete, env, jobj, jkey, + jkey_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: singleDelete + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_singleDelete__J_3BIJ( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jlong jcf_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto single_delete = [&wbwi, &cf_handle](ROCKSDB_NAMESPACE::Slice key) { + return wbwi->SingleDelete(cf_handle, key); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::k_op(single_delete, env, jobj, jkey, + jkey_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: deleteDirect + * Signature: (JLjava/nio/ByteBuffer;IIJ)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_deleteDirect( + JNIEnv* env, jobject /*jobj*/, jlong jwb_handle, jobject jkey, + jint jkey_offset, jint jkey_len, jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + auto remove = [&wb, &cf_handle](ROCKSDB_NAMESPACE::Slice& key) { + if (cf_handle == nullptr) { + wb->Delete(key); + } else { + wb->Delete(cf_handle, key); + } + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_direct(remove, env, jkey, jkey_offset, + jkey_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: deleteRange + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_deleteRange__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jbegin_key, + jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto deleteRange = [&wbwi](ROCKSDB_NAMESPACE::Slice beginKey, + ROCKSDB_NAMESPACE::Slice endKey) { + return wbwi->DeleteRange(beginKey, endKey); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key, + jbegin_key_len, jend_key, jend_key_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: deleteRange + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_deleteRange__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jbegin_key, + jint jbegin_key_len, jbyteArray jend_key, jint jend_key_len, + jlong jcf_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto* cf_handle = + reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto deleteRange = [&wbwi, &cf_handle](ROCKSDB_NAMESPACE::Slice beginKey, + ROCKSDB_NAMESPACE::Slice endKey) { + return wbwi->DeleteRange(cf_handle, beginKey, endKey); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::kv_op(deleteRange, env, jobj, jbegin_key, + jbegin_key_len, jend_key, jend_key_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: putLogData + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_putLogData(JNIEnv* env, jobject jobj, + jlong jwbwi_handle, + jbyteArray jblob, + jint jblob_len) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto putLogData = [&wbwi](ROCKSDB_NAMESPACE::Slice blob) { + return wbwi->PutLogData(blob); + }; + std::unique_ptr status = + ROCKSDB_NAMESPACE::JniUtil::k_op(putLogData, env, jobj, jblob, jblob_len); + if (status != nullptr && !status->ok()) { + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, status); + } +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: clear + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_clear0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jwbwi_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + + wbwi->Clear(); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: setSavePoint0 + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_setSavePoint0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jwbwi_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + + wbwi->SetSavePoint(); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: rollbackToSavePoint0 + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_rollbackToSavePoint0( + JNIEnv* env, jobject /*jobj*/, jlong jwbwi_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + + auto s = wbwi->RollbackToSavePoint(); + + if (s.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: popSavePoint + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_popSavePoint(JNIEnv* env, + jobject /*jobj*/, + jlong jwbwi_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + + auto s = wbwi->PopSavePoint(); + + if (s.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: setMaxBytes + * Signature: (JJ)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_setMaxBytes(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jwbwi_handle, + jlong jmax_bytes) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + + wbwi->SetMaxBytes(static_cast(jmax_bytes)); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: getWriteBatch + * Signature: (J)Lorg/rocksdb/WriteBatch; + */ +jobject Java_org_rocksdb_WriteBatchWithIndex_getWriteBatch(JNIEnv* env, + jobject /*jobj*/, + jlong jwbwi_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + + auto* wb = wbwi->GetWriteBatch(); + + // TODO(AR) is the `wb` object owned by us? + return ROCKSDB_NAMESPACE::WriteBatchJni::construct(env, wb); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: iterator0 + * Signature: (J)J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jwbwi_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + auto* wbwi_iterator = wbwi->NewIterator(); + return GET_CPLUSPLUS_POINTER(wbwi_iterator); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: iterator1 + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_iterator1(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jwbwi_handle, + jlong jcf_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + auto* wbwi_iterator = wbwi->NewIterator(cf_handle); + return GET_CPLUSPLUS_POINTER(wbwi_iterator); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: iteratorWithBase + * Signature: (JJJJ)J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_iteratorWithBase( + JNIEnv*, jobject, jlong jwbwi_handle, jlong jcf_handle, + jlong jbase_iterator_handle, jlong jread_opts_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + auto* base_iterator = + reinterpret_cast(jbase_iterator_handle); + ROCKSDB_NAMESPACE::ReadOptions* read_opts = + jread_opts_handle == 0 + ? nullptr + : reinterpret_cast( + jread_opts_handle); + auto* iterator = + wbwi->NewIteratorWithBase(cf_handle, base_iterator, read_opts); + return GET_CPLUSPLUS_POINTER(iterator); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: getFromBatch + * Signature: (JJ[BI)[B + */ +jbyteArray JNICALL Java_org_rocksdb_WriteBatchWithIndex_getFromBatch__JJ_3BI( + JNIEnv* env, jobject /*jobj*/, jlong jwbwi_handle, jlong jdbopt_handle, + jbyteArray jkey, jint jkey_len) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + auto* dbopt = reinterpret_cast(jdbopt_handle); + + auto getter = [&wbwi, &dbopt](const ROCKSDB_NAMESPACE::Slice& key, + std::string* value) { + return wbwi->GetFromBatch(*dbopt, key, value); + }; + + return ROCKSDB_NAMESPACE::JniUtil::v_op(getter, env, jkey, jkey_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: getFromBatch + * Signature: (JJ[BIJ)[B + */ +jbyteArray Java_org_rocksdb_WriteBatchWithIndex_getFromBatch__JJ_3BIJ( + JNIEnv* env, jobject /*jobj*/, jlong jwbwi_handle, jlong jdbopt_handle, + jbyteArray jkey, jint jkey_len, jlong jcf_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + auto* dbopt = reinterpret_cast(jdbopt_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + + auto getter = [&wbwi, &cf_handle, &dbopt](const ROCKSDB_NAMESPACE::Slice& key, + std::string* value) { + return wbwi->GetFromBatch(cf_handle, *dbopt, key, value); + }; + + return ROCKSDB_NAMESPACE::JniUtil::v_op(getter, env, jkey, jkey_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: getFromBatchAndDB + * Signature: (JJJ[BI)[B + */ +jbyteArray Java_org_rocksdb_WriteBatchWithIndex_getFromBatchAndDB__JJJ_3BI( + JNIEnv* env, jobject /*jobj*/, jlong jwbwi_handle, jlong jdb_handle, + jlong jreadopt_handle, jbyteArray jkey, jint jkey_len) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + auto* db = reinterpret_cast(jdb_handle); + auto* readopt = + reinterpret_cast(jreadopt_handle); + + auto getter = [&wbwi, &db, &readopt](const ROCKSDB_NAMESPACE::Slice& key, + std::string* value) { + return wbwi->GetFromBatchAndDB(db, *readopt, key, value); + }; + + return ROCKSDB_NAMESPACE::JniUtil::v_op(getter, env, jkey, jkey_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: getFromBatchAndDB + * Signature: (JJJ[BIJ)[B + */ +jbyteArray Java_org_rocksdb_WriteBatchWithIndex_getFromBatchAndDB__JJJ_3BIJ( + JNIEnv* env, jobject /*jobj*/, jlong jwbwi_handle, jlong jdb_handle, + jlong jreadopt_handle, jbyteArray jkey, jint jkey_len, jlong jcf_handle) { + auto* wbwi = + reinterpret_cast(jwbwi_handle); + auto* db = reinterpret_cast(jdb_handle); + auto* readopt = + reinterpret_cast(jreadopt_handle); + auto* cf_handle = + reinterpret_cast(jcf_handle); + + auto getter = [&wbwi, &db, &cf_handle, &readopt]( + const ROCKSDB_NAMESPACE::Slice& key, std::string* value) { + return wbwi->GetFromBatchAndDB(db, *readopt, cf_handle, key, value); + }; + + return ROCKSDB_NAMESPACE::JniUtil::v_op(getter, env, jkey, jkey_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + auto* wbwi = + reinterpret_cast(handle); + assert(wbwi != nullptr); + delete wbwi; +} + +/* WBWIRocksIterator below */ + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + assert(it != nullptr); + delete it; +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: isValid0 + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WBWIRocksIterator_isValid0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + return reinterpret_cast(handle)->Valid(); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: seekToFirst0 + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_seekToFirst0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->SeekToFirst(); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: seekToLast0 + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_seekToLast0(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->SeekToLast(); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: next0 + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_next0(JNIEnv* /*env*/, jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->Next(); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: prev0 + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_prev0(JNIEnv* /*env*/, jobject /*jobj*/, + jlong handle) { + reinterpret_cast(handle)->Prev(); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: seek0 + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WBWIRocksIterator_seek0(JNIEnv* env, jobject /*jobj*/, + jlong handle, jbyteArray jtarget, + jint jtarget_len) { + auto* it = reinterpret_cast(handle); + jbyte* target = new jbyte[jtarget_len]; + env->GetByteArrayRegion(jtarget, 0, jtarget_len, target); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] target; + return; + } + + ROCKSDB_NAMESPACE::Slice target_slice(reinterpret_cast(target), + jtarget_len); + + it->Seek(target_slice); + + delete[] target; +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: seekDirect0 + * Signature: (JLjava/nio/ByteBuffer;II)V + */ +void Java_org_rocksdb_WBWIRocksIterator_seekDirect0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget, + jint jtarget_off, jint jtarget_len) { + auto* it = reinterpret_cast(handle); + auto seek = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) { + it->Seek(target_slice); + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_direct(seek, env, jtarget, jtarget_off, + jtarget_len); +} + +/* + * This method supports fetching into indirect byte buffers; + * the Java wrapper extracts the byte[] and passes it here. + * + * Class: org_rocksdb_WBWIRocksIterator + * Method: seekByteArray0 + * Signature: (J[BII)V + */ +void Java_org_rocksdb_WBWIRocksIterator_seekByteArray0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jbyteArray jtarget, + jint jtarget_off, jint jtarget_len) { + const std::unique_ptr target(new char[jtarget_len]); + if (target == nullptr) { + jclass oom_class = env->FindClass("/lang/java/OutOfMemoryError"); + env->ThrowNew(oom_class, + "Memory allocation failed in RocksDB JNI function"); + return; + } + env->GetByteArrayRegion(jtarget, jtarget_off, jtarget_len, + reinterpret_cast(target.get())); + + ROCKSDB_NAMESPACE::Slice target_slice(target.get(), jtarget_len); + + auto* it = reinterpret_cast(handle); + it->Seek(target_slice); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: seekForPrev0 + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WBWIRocksIterator_seekForPrev0(JNIEnv* env, + jobject /*jobj*/, + jlong handle, + jbyteArray jtarget, + jint jtarget_len) { + auto* it = reinterpret_cast(handle); + jbyte* target = new jbyte[jtarget_len]; + env->GetByteArrayRegion(jtarget, 0, jtarget_len, target); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + delete[] target; + return; + } + + ROCKSDB_NAMESPACE::Slice target_slice(reinterpret_cast(target), + jtarget_len); + + it->SeekForPrev(target_slice); + + delete[] target; +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: seekForPrevDirect0 + * Signature: (JLjava/nio/ByteBuffer;II)V + */ +void Java_org_rocksdb_WBWIRocksIterator_seekForPrevDirect0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jobject jtarget, + jint jtarget_off, jint jtarget_len) { + auto* it = reinterpret_cast(handle); + auto seek_for_prev = [&it](ROCKSDB_NAMESPACE::Slice& target_slice) { + it->SeekForPrev(target_slice); + }; + ROCKSDB_NAMESPACE::JniUtil::k_op_direct(seek_for_prev, env, jtarget, + jtarget_off, jtarget_len); +} + +/* + * This method supports fetching into indirect byte buffers; + * the Java wrapper extracts the byte[] and passes it here. + * + * Class: org_rocksdb_WBWIRocksIterator + * Method: seekForPrevByteArray0 + * Signature: (J[BII)V + */ +void Java_org_rocksdb_WBWIRocksIterator_seekForPrevByteArray0( + JNIEnv* env, jobject /*jobj*/, jlong handle, jbyteArray jtarget, + jint jtarget_off, jint jtarget_len) { + const std::unique_ptr target(new char[jtarget_len]); + if (target == nullptr) { + jclass oom_class = env->FindClass("/lang/java/OutOfMemoryError"); + env->ThrowNew(oom_class, + "Memory allocation failed in RocksDB JNI function"); + return; + } + env->GetByteArrayRegion(jtarget, jtarget_off, jtarget_len, + reinterpret_cast(target.get())); + + ROCKSDB_NAMESPACE::Slice target_slice(target.get(), jtarget_len); + + auto* it = reinterpret_cast(handle); + it->SeekForPrev(target_slice); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: status0 + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_status0(JNIEnv* env, jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + ROCKSDB_NAMESPACE::Status s = it->status(); + + if (s.ok()) { + return; + } + + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: entry1 + * Signature: (J)[J + */ +jlongArray Java_org_rocksdb_WBWIRocksIterator_entry1(JNIEnv* env, + jobject /*jobj*/, + jlong handle) { + auto* it = reinterpret_cast(handle); + const ROCKSDB_NAMESPACE::WriteEntry& we = it->Entry(); + + jlong results[3]; + + // set the type of the write entry + results[0] = ROCKSDB_NAMESPACE::WriteTypeJni::toJavaWriteType(we.type); + + // NOTE: key_slice and value_slice will be freed by + // org.rocksdb.DirectSlice#close + + auto* key_slice = new ROCKSDB_NAMESPACE::Slice(we.key.data(), we.key.size()); + results[1] = GET_CPLUSPLUS_POINTER(key_slice); + if (we.type == ROCKSDB_NAMESPACE::kDeleteRecord || + we.type == ROCKSDB_NAMESPACE::kSingleDeleteRecord || + we.type == ROCKSDB_NAMESPACE::kLogDataRecord) { + // set native handle of value slice to null if no value available + results[2] = 0; + } else { + auto* value_slice = + new ROCKSDB_NAMESPACE::Slice(we.value.data(), we.value.size()); + results[2] = GET_CPLUSPLUS_POINTER(value_slice); + } + + jlongArray jresults = env->NewLongArray(3); + if (jresults == nullptr) { + // exception thrown: OutOfMemoryError + if (results[2] != 0) { + auto* value_slice = + reinterpret_cast(results[2]); + delete value_slice; + } + delete key_slice; + return nullptr; + } + + env->SetLongArrayRegion(jresults, 0, 3, results); + if (env->ExceptionCheck()) { + // exception thrown: ArrayIndexOutOfBoundsException + env->DeleteLocalRef(jresults); + if (results[2] != 0) { + auto* value_slice = + reinterpret_cast(results[2]); + delete value_slice; + } + delete key_slice; + return nullptr; + } + + return jresults; +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: refresh0 + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_refresh0(JNIEnv* env) { + ROCKSDB_NAMESPACE::Status s = + ROCKSDB_NAMESPACE::Status::NotSupported("Refresh() is not supported"); + ROCKSDB_NAMESPACE::RocksDBExceptionJni::ThrowNew(env, s); +} diff --git a/src/rocksdb/java/rocksjni/write_buffer_manager.cc b/src/rocksdb/java/rocksjni/write_buffer_manager.cc new file mode 100644 index 000000000..b5b7d193b --- /dev/null +++ b/src/rocksdb/java/rocksjni/write_buffer_manager.cc @@ -0,0 +1,45 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +#include "rocksdb/write_buffer_manager.h" + +#include + +#include "include/org_rocksdb_WriteBufferManager.h" +#include "rocksdb/cache.h" +#include "rocksjni/cplusplus_to_java_convert.h" + +/* + * Class: org_rocksdb_WriteBufferManager + * Method: newWriteBufferManager + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_WriteBufferManager_newWriteBufferManager( + JNIEnv* /*env*/, jclass /*jclazz*/, jlong jbuffer_size, jlong jcache_handle, + jboolean allow_stall) { + auto* cache_ptr = + reinterpret_cast*>( + jcache_handle); + auto* write_buffer_manager = + new std::shared_ptr( + std::make_shared( + jbuffer_size, *cache_ptr, allow_stall)); + return GET_CPLUSPLUS_POINTER(write_buffer_manager); +} + +/* + * Class: org_rocksdb_WriteBufferManager + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBufferManager_disposeInternal(JNIEnv* /*env*/, + jobject /*jobj*/, + jlong jhandle) { + auto* write_buffer_manager = + reinterpret_cast*>( + jhandle); + assert(write_buffer_manager != nullptr); + delete write_buffer_manager; +} diff --git a/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc b/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc new file mode 100644 index 000000000..66ceabe9a --- /dev/null +++ b/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc @@ -0,0 +1,519 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::Comparator. + +#include "rocksjni/writebatchhandlerjnicallback.h" + +#include "rocksjni/portal.h" + +namespace ROCKSDB_NAMESPACE { +WriteBatchHandlerJniCallback::WriteBatchHandlerJniCallback( + JNIEnv* env, jobject jWriteBatchHandler) + : JniCallback(env, jWriteBatchHandler), m_env(env) { + m_jPutCfMethodId = WriteBatchHandlerJni::getPutCfMethodId(env); + if (m_jPutCfMethodId == nullptr) { + // exception thrown + return; + } + + m_jPutMethodId = WriteBatchHandlerJni::getPutMethodId(env); + if (m_jPutMethodId == nullptr) { + // exception thrown + return; + } + + m_jMergeCfMethodId = WriteBatchHandlerJni::getMergeCfMethodId(env); + if (m_jMergeCfMethodId == nullptr) { + // exception thrown + return; + } + + m_jMergeMethodId = WriteBatchHandlerJni::getMergeMethodId(env); + if (m_jMergeMethodId == nullptr) { + // exception thrown + return; + } + + m_jDeleteCfMethodId = WriteBatchHandlerJni::getDeleteCfMethodId(env); + if (m_jDeleteCfMethodId == nullptr) { + // exception thrown + return; + } + + m_jDeleteMethodId = WriteBatchHandlerJni::getDeleteMethodId(env); + if (m_jDeleteMethodId == nullptr) { + // exception thrown + return; + } + + m_jSingleDeleteCfMethodId = + WriteBatchHandlerJni::getSingleDeleteCfMethodId(env); + if (m_jSingleDeleteCfMethodId == nullptr) { + // exception thrown + return; + } + + m_jSingleDeleteMethodId = WriteBatchHandlerJni::getSingleDeleteMethodId(env); + if (m_jSingleDeleteMethodId == nullptr) { + // exception thrown + return; + } + + m_jDeleteRangeCfMethodId = + WriteBatchHandlerJni::getDeleteRangeCfMethodId(env); + if (m_jDeleteRangeCfMethodId == nullptr) { + // exception thrown + return; + } + + m_jDeleteRangeMethodId = WriteBatchHandlerJni::getDeleteRangeMethodId(env); + if (m_jDeleteRangeMethodId == nullptr) { + // exception thrown + return; + } + + m_jLogDataMethodId = WriteBatchHandlerJni::getLogDataMethodId(env); + if (m_jLogDataMethodId == nullptr) { + // exception thrown + return; + } + + m_jPutBlobIndexCfMethodId = + WriteBatchHandlerJni::getPutBlobIndexCfMethodId(env); + if (m_jPutBlobIndexCfMethodId == nullptr) { + // exception thrown + return; + } + + m_jMarkBeginPrepareMethodId = + WriteBatchHandlerJni::getMarkBeginPrepareMethodId(env); + if (m_jMarkBeginPrepareMethodId == nullptr) { + // exception thrown + return; + } + + m_jMarkEndPrepareMethodId = + WriteBatchHandlerJni::getMarkEndPrepareMethodId(env); + if (m_jMarkEndPrepareMethodId == nullptr) { + // exception thrown + return; + } + + m_jMarkNoopMethodId = WriteBatchHandlerJni::getMarkNoopMethodId(env); + if (m_jMarkNoopMethodId == nullptr) { + // exception thrown + return; + } + + m_jMarkRollbackMethodId = WriteBatchHandlerJni::getMarkRollbackMethodId(env); + if (m_jMarkRollbackMethodId == nullptr) { + // exception thrown + return; + } + + m_jMarkCommitMethodId = WriteBatchHandlerJni::getMarkCommitMethodId(env); + if (m_jMarkCommitMethodId == nullptr) { + // exception thrown + return; + } + + m_jMarkCommitWithTimestampMethodId = + WriteBatchHandlerJni::getMarkCommitWithTimestampMethodId(env); + if (m_jMarkCommitWithTimestampMethodId == nullptr) { + // exception thrown + return; + } + + m_jContinueMethodId = WriteBatchHandlerJni::getContinueMethodId(env); + if (m_jContinueMethodId == nullptr) { + // exception thrown + return; + } +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::PutCF( + uint32_t column_family_id, const Slice& key, const Slice& value) { + auto put = [this, column_family_id](jbyteArray j_key, jbyteArray j_value) { + m_env->CallVoidMethod(m_jcallback_obj, m_jPutCfMethodId, + static_cast(column_family_id), j_key, j_value); + }; + auto status = WriteBatchHandlerJniCallback::kv_op(key, value, put); + if (status == nullptr) { + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is + // an Exception but we don't know + // the ROCKSDB_NAMESPACE::Status? + } else { + return ROCKSDB_NAMESPACE::Status(*status); + } +} + +void WriteBatchHandlerJniCallback::Put(const Slice& key, const Slice& value) { + auto put = [this](jbyteArray j_key, jbyteArray j_value) { + m_env->CallVoidMethod(m_jcallback_obj, m_jPutMethodId, j_key, j_value); + }; + WriteBatchHandlerJniCallback::kv_op(key, value, put); +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MergeCF( + uint32_t column_family_id, const Slice& key, const Slice& value) { + auto merge = [this, column_family_id](jbyteArray j_key, jbyteArray j_value) { + m_env->CallVoidMethod(m_jcallback_obj, m_jMergeCfMethodId, + static_cast(column_family_id), j_key, j_value); + }; + auto status = WriteBatchHandlerJniCallback::kv_op(key, value, merge); + if (status == nullptr) { + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is + // an Exception but we don't know + // the ROCKSDB_NAMESPACE::Status? + } else { + return ROCKSDB_NAMESPACE::Status(*status); + } +} + +void WriteBatchHandlerJniCallback::Merge(const Slice& key, const Slice& value) { + auto merge = [this](jbyteArray j_key, jbyteArray j_value) { + m_env->CallVoidMethod(m_jcallback_obj, m_jMergeMethodId, j_key, j_value); + }; + WriteBatchHandlerJniCallback::kv_op(key, value, merge); +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::DeleteCF( + uint32_t column_family_id, const Slice& key) { + auto remove = [this, column_family_id](jbyteArray j_key) { + m_env->CallVoidMethod(m_jcallback_obj, m_jDeleteCfMethodId, + static_cast(column_family_id), j_key); + }; + auto status = WriteBatchHandlerJniCallback::k_op(key, remove); + if (status == nullptr) { + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is + // an Exception but we don't know + // the ROCKSDB_NAMESPACE::Status? + } else { + return ROCKSDB_NAMESPACE::Status(*status); + } +} + +void WriteBatchHandlerJniCallback::Delete(const Slice& key) { + auto remove = [this](jbyteArray j_key) { + m_env->CallVoidMethod(m_jcallback_obj, m_jDeleteMethodId, j_key); + }; + WriteBatchHandlerJniCallback::k_op(key, remove); +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::SingleDeleteCF( + uint32_t column_family_id, const Slice& key) { + auto singleDelete = [this, column_family_id](jbyteArray j_key) { + m_env->CallVoidMethod(m_jcallback_obj, m_jSingleDeleteCfMethodId, + static_cast(column_family_id), j_key); + }; + auto status = WriteBatchHandlerJniCallback::k_op(key, singleDelete); + if (status == nullptr) { + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is + // an Exception but we don't know + // the ROCKSDB_NAMESPACE::Status? + } else { + return ROCKSDB_NAMESPACE::Status(*status); + } +} + +void WriteBatchHandlerJniCallback::SingleDelete(const Slice& key) { + auto singleDelete = [this](jbyteArray j_key) { + m_env->CallVoidMethod(m_jcallback_obj, m_jSingleDeleteMethodId, j_key); + }; + WriteBatchHandlerJniCallback::k_op(key, singleDelete); +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::DeleteRangeCF( + uint32_t column_family_id, const Slice& beginKey, const Slice& endKey) { + auto deleteRange = [this, column_family_id](jbyteArray j_beginKey, + jbyteArray j_endKey) { + m_env->CallVoidMethod(m_jcallback_obj, m_jDeleteRangeCfMethodId, + static_cast(column_family_id), j_beginKey, + j_endKey); + }; + auto status = + WriteBatchHandlerJniCallback::kv_op(beginKey, endKey, deleteRange); + if (status == nullptr) { + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is + // an Exception but we don't know + // the ROCKSDB_NAMESPACE::Status? + } else { + return ROCKSDB_NAMESPACE::Status(*status); + } +} + +void WriteBatchHandlerJniCallback::DeleteRange(const Slice& beginKey, + const Slice& endKey) { + auto deleteRange = [this](jbyteArray j_beginKey, jbyteArray j_endKey) { + m_env->CallVoidMethod(m_jcallback_obj, m_jDeleteRangeMethodId, j_beginKey, + j_endKey); + }; + WriteBatchHandlerJniCallback::kv_op(beginKey, endKey, deleteRange); +} + +void WriteBatchHandlerJniCallback::LogData(const Slice& blob) { + auto logData = [this](jbyteArray j_blob) { + m_env->CallVoidMethod(m_jcallback_obj, m_jLogDataMethodId, j_blob); + }; + WriteBatchHandlerJniCallback::k_op(blob, logData); +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::PutBlobIndexCF( + uint32_t column_family_id, const Slice& key, const Slice& value) { + auto putBlobIndex = [this, column_family_id](jbyteArray j_key, + jbyteArray j_value) { + m_env->CallVoidMethod(m_jcallback_obj, m_jPutBlobIndexCfMethodId, + static_cast(column_family_id), j_key, j_value); + }; + auto status = WriteBatchHandlerJniCallback::kv_op(key, value, putBlobIndex); + if (status == nullptr) { + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is + // an Exception but we don't know + // the ROCKSDB_NAMESPACE::Status? + } else { + return ROCKSDB_NAMESPACE::Status(*status); + } +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MarkBeginPrepare( + bool unprepare) { +#ifndef DEBUG + (void)unprepare; +#else + assert(!unprepare); +#endif + m_env->CallVoidMethod(m_jcallback_obj, m_jMarkBeginPrepareMethodId); + + // check for Exception, in-particular RocksDBException + if (m_env->ExceptionCheck()) { + // exception thrown + jthrowable exception = m_env->ExceptionOccurred(); + std::unique_ptr status = + ROCKSDB_NAMESPACE::RocksDBExceptionJni::toCppStatus(m_env, exception); + if (status == nullptr) { + // unkown status or exception occurred extracting status + m_env->ExceptionDescribe(); + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) probably need a + // better error code here + + } else { + m_env->ExceptionClear(); // clear the exception, as we have extracted the + // status + return ROCKSDB_NAMESPACE::Status(*status); + } + } + + return ROCKSDB_NAMESPACE::Status::OK(); +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MarkEndPrepare( + const Slice& xid) { + auto markEndPrepare = [this](jbyteArray j_xid) { + m_env->CallVoidMethod(m_jcallback_obj, m_jMarkEndPrepareMethodId, j_xid); + }; + auto status = WriteBatchHandlerJniCallback::k_op(xid, markEndPrepare); + if (status == nullptr) { + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is + // an Exception but we don't know + // the ROCKSDB_NAMESPACE::Status? + } else { + return ROCKSDB_NAMESPACE::Status(*status); + } +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MarkNoop( + bool empty_batch) { + m_env->CallVoidMethod(m_jcallback_obj, m_jMarkNoopMethodId, + static_cast(empty_batch)); + + // check for Exception, in-particular RocksDBException + if (m_env->ExceptionCheck()) { + // exception thrown + jthrowable exception = m_env->ExceptionOccurred(); + std::unique_ptr status = + ROCKSDB_NAMESPACE::RocksDBExceptionJni::toCppStatus(m_env, exception); + if (status == nullptr) { + // unkown status or exception occurred extracting status + m_env->ExceptionDescribe(); + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) probably need a + // better error code here + + } else { + m_env->ExceptionClear(); // clear the exception, as we have extracted the + // status + return ROCKSDB_NAMESPACE::Status(*status); + } + } + + return ROCKSDB_NAMESPACE::Status::OK(); +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MarkRollback( + const Slice& xid) { + auto markRollback = [this](jbyteArray j_xid) { + m_env->CallVoidMethod(m_jcallback_obj, m_jMarkRollbackMethodId, j_xid); + }; + auto status = WriteBatchHandlerJniCallback::k_op(xid, markRollback); + if (status == nullptr) { + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is + // an Exception but we don't know + // the ROCKSDB_NAMESPACE::Status? + } else { + return ROCKSDB_NAMESPACE::Status(*status); + } +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MarkCommit( + const Slice& xid) { + auto markCommit = [this](jbyteArray j_xid) { + m_env->CallVoidMethod(m_jcallback_obj, m_jMarkCommitMethodId, j_xid); + }; + auto status = WriteBatchHandlerJniCallback::k_op(xid, markCommit); + if (status == nullptr) { + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is + // an Exception but we don't know + // the ROCKSDB_NAMESPACE::Status? + } else { + return ROCKSDB_NAMESPACE::Status(*status); + } +} + +ROCKSDB_NAMESPACE::Status WriteBatchHandlerJniCallback::MarkCommitWithTimestamp( + const Slice& xid, const Slice& ts) { + auto markCommitWithTimestamp = [this](jbyteArray j_xid, jbyteArray j_ts) { + m_env->CallVoidMethod(m_jcallback_obj, m_jMarkCommitWithTimestampMethodId, + j_xid, j_ts); + }; + auto status = + WriteBatchHandlerJniCallback::kv_op(xid, ts, markCommitWithTimestamp); + if (status == nullptr) { + return ROCKSDB_NAMESPACE::Status::OK(); // TODO(AR) what to do if there is + // an Exception but we don't know + // the ROCKSDB_NAMESPACE::Status? + } else { + return ROCKSDB_NAMESPACE::Status(*status); + } +} + +bool WriteBatchHandlerJniCallback::Continue() { + jboolean jContinue = + m_env->CallBooleanMethod(m_jcallback_obj, m_jContinueMethodId); + if (m_env->ExceptionCheck()) { + // exception thrown + m_env->ExceptionDescribe(); + } + + return static_cast(jContinue == JNI_TRUE); +} + +std::unique_ptr WriteBatchHandlerJniCallback::kv_op( + const Slice& key, const Slice& value, + std::function kvFn) { + const jbyteArray j_key = JniUtil::copyBytes(m_env, key); + if (j_key == nullptr) { + // exception thrown + if (m_env->ExceptionCheck()) { + m_env->ExceptionDescribe(); + } + return nullptr; + } + + const jbyteArray j_value = JniUtil::copyBytes(m_env, value); + if (j_value == nullptr) { + // exception thrown + if (m_env->ExceptionCheck()) { + m_env->ExceptionDescribe(); + } + if (j_key != nullptr) { + m_env->DeleteLocalRef(j_key); + } + return nullptr; + } + + kvFn(j_key, j_value); + + // check for Exception, in-particular RocksDBException + if (m_env->ExceptionCheck()) { + if (j_value != nullptr) { + m_env->DeleteLocalRef(j_value); + } + if (j_key != nullptr) { + m_env->DeleteLocalRef(j_key); + } + + // exception thrown + jthrowable exception = m_env->ExceptionOccurred(); + std::unique_ptr status = + ROCKSDB_NAMESPACE::RocksDBExceptionJni::toCppStatus(m_env, exception); + if (status == nullptr) { + // unkown status or exception occurred extracting status + m_env->ExceptionDescribe(); + return nullptr; + + } else { + m_env->ExceptionClear(); // clear the exception, as we have extracted the + // status + return status; + } + } + + if (j_value != nullptr) { + m_env->DeleteLocalRef(j_value); + } + if (j_key != nullptr) { + m_env->DeleteLocalRef(j_key); + } + + // all OK + return std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::OK())); +} + +std::unique_ptr WriteBatchHandlerJniCallback::k_op( + const Slice& key, std::function kFn) { + const jbyteArray j_key = JniUtil::copyBytes(m_env, key); + if (j_key == nullptr) { + // exception thrown + if (m_env->ExceptionCheck()) { + m_env->ExceptionDescribe(); + } + return nullptr; + } + + kFn(j_key); + + // check for Exception, in-particular RocksDBException + if (m_env->ExceptionCheck()) { + if (j_key != nullptr) { + m_env->DeleteLocalRef(j_key); + } + + // exception thrown + jthrowable exception = m_env->ExceptionOccurred(); + std::unique_ptr status = + ROCKSDB_NAMESPACE::RocksDBExceptionJni::toCppStatus(m_env, exception); + if (status == nullptr) { + // unkown status or exception occurred extracting status + m_env->ExceptionDescribe(); + return nullptr; + + } else { + m_env->ExceptionClear(); // clear the exception, as we have extracted the + // status + return status; + } + } + + if (j_key != nullptr) { + m_env->DeleteLocalRef(j_key); + } + + // all OK + return std::unique_ptr( + new ROCKSDB_NAMESPACE::Status(ROCKSDB_NAMESPACE::Status::OK())); +} +} // namespace ROCKSDB_NAMESPACE diff --git a/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h b/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h new file mode 100644 index 000000000..9629797ca --- /dev/null +++ b/src/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h @@ -0,0 +1,92 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// This file implements the callback "bridge" between Java and C++ for +// ROCKSDB_NAMESPACE::WriteBatch::Handler. + +#ifndef JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_ +#define JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_ + +#include + +#include +#include + +#include "rocksdb/write_batch.h" +#include "rocksjni/jnicallback.h" + +namespace ROCKSDB_NAMESPACE { +/** + * This class acts as a bridge between C++ + * and Java. The methods in this class will be + * called back from the RocksDB storage engine (C++) + * which calls the appropriate Java method. + * This enables Write Batch Handlers to be implemented in Java. + */ +class WriteBatchHandlerJniCallback : public JniCallback, + public WriteBatch::Handler { + public: + WriteBatchHandlerJniCallback(JNIEnv* env, jobject jWriteBackHandler); + Status PutCF(uint32_t column_family_id, const Slice& key, const Slice& value); + void Put(const Slice& key, const Slice& value); + Status MergeCF(uint32_t column_family_id, const Slice& key, + const Slice& value); + void Merge(const Slice& key, const Slice& value); + Status DeleteCF(uint32_t column_family_id, const Slice& key); + void Delete(const Slice& key); + Status SingleDeleteCF(uint32_t column_family_id, const Slice& key); + void SingleDelete(const Slice& key); + Status DeleteRangeCF(uint32_t column_family_id, const Slice& beginKey, + const Slice& endKey); + void DeleteRange(const Slice& beginKey, const Slice& endKey); + void LogData(const Slice& blob); + Status PutBlobIndexCF(uint32_t column_family_id, const Slice& key, + const Slice& value); + Status MarkBeginPrepare(bool); + Status MarkEndPrepare(const Slice& xid); + Status MarkNoop(bool empty_batch); + Status MarkRollback(const Slice& xid); + Status MarkCommit(const Slice& xid); + Status MarkCommitWithTimestamp(const Slice& xid, const Slice& commit_ts); + bool Continue(); + + private: + JNIEnv* m_env; + jmethodID m_jPutCfMethodId; + jmethodID m_jPutMethodId; + jmethodID m_jMergeCfMethodId; + jmethodID m_jMergeMethodId; + jmethodID m_jDeleteCfMethodId; + jmethodID m_jDeleteMethodId; + jmethodID m_jSingleDeleteCfMethodId; + jmethodID m_jSingleDeleteMethodId; + jmethodID m_jDeleteRangeCfMethodId; + jmethodID m_jDeleteRangeMethodId; + jmethodID m_jLogDataMethodId; + jmethodID m_jPutBlobIndexCfMethodId; + jmethodID m_jMarkBeginPrepareMethodId; + jmethodID m_jMarkEndPrepareMethodId; + jmethodID m_jMarkNoopMethodId; + jmethodID m_jMarkRollbackMethodId; + jmethodID m_jMarkCommitMethodId; + jmethodID m_jMarkCommitWithTimestampMethodId; + jmethodID m_jContinueMethodId; + /** + * @return A pointer to a ROCKSDB_NAMESPACE::Status or nullptr if an + * unexpected exception occurred + */ + std::unique_ptr kv_op( + const Slice& key, const Slice& value, + std::function kvFn); + /** + * @return A pointer to a ROCKSDB_NAMESPACE::Status or nullptr if an + * unexpected exception occurred + */ + std::unique_ptr k_op( + const Slice& key, std::function kFn); +}; +} // namespace ROCKSDB_NAMESPACE + +#endif // JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_ diff --git a/src/rocksdb/java/samples/src/main/java/OptimisticTransactionSample.java b/src/rocksdb/java/samples/src/main/java/OptimisticTransactionSample.java new file mode 100644 index 000000000..7e7a22e94 --- /dev/null +++ b/src/rocksdb/java/samples/src/main/java/OptimisticTransactionSample.java @@ -0,0 +1,184 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +import org.rocksdb.*; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * Demonstrates using Transactions on an OptimisticTransactionDB with + * varying isolation guarantees + */ +public class OptimisticTransactionSample { + private static final String dbPath = "/tmp/rocksdb_optimistic_transaction_example"; + + public static final void main(final String args[]) throws RocksDBException { + + try(final Options options = new Options() + .setCreateIfMissing(true); + final OptimisticTransactionDB txnDb = + OptimisticTransactionDB.open(options, dbPath)) { + + try (final WriteOptions writeOptions = new WriteOptions(); + final ReadOptions readOptions = new ReadOptions()) { + + //////////////////////////////////////////////////////// + // + // Simple OptimisticTransaction Example ("Read Committed") + // + //////////////////////////////////////////////////////// + readCommitted(txnDb, writeOptions, readOptions); + + + //////////////////////////////////////////////////////// + // + // "Repeatable Read" (Snapshot Isolation) Example + // -- Using a single Snapshot + // + //////////////////////////////////////////////////////// + repeatableRead(txnDb, writeOptions, readOptions); + + + //////////////////////////////////////////////////////// + // + // "Read Committed" (Monotonic Atomic Views) Example + // --Using multiple Snapshots + // + //////////////////////////////////////////////////////// + readCommitted_monotonicAtomicViews(txnDb, writeOptions, readOptions); + } + } + } + + /** + * Demonstrates "Read Committed" isolation + */ + private static void readCommitted(final OptimisticTransactionDB txnDb, + final WriteOptions writeOptions, final ReadOptions readOptions) + throws RocksDBException { + final byte key1[] = "abc".getBytes(UTF_8); + final byte value1[] = "def".getBytes(UTF_8); + + final byte key2[] = "xyz".getBytes(UTF_8); + final byte value2[] = "zzz".getBytes(UTF_8); + + // Start a transaction + try(final Transaction txn = txnDb.beginTransaction(writeOptions)) { + // Read a key in this transaction + byte[] value = txn.get(readOptions, key1); + assert(value == null); + + // Write a key in this transaction + txn.put(key1, value1); + + // Read a key OUTSIDE this transaction. Does not affect txn. + value = txnDb.get(readOptions, key1); + assert(value == null); + + // Write a key OUTSIDE of this transaction. + // Does not affect txn since this is an unrelated key. + // If we wrote key 'abc' here, the transaction would fail to commit. + txnDb.put(writeOptions, key2, value2); + + // Commit transaction + txn.commit(); + } + } + + /** + * Demonstrates "Repeatable Read" (Snapshot Isolation) isolation + */ + private static void repeatableRead(final OptimisticTransactionDB txnDb, + final WriteOptions writeOptions, final ReadOptions readOptions) + throws RocksDBException { + + final byte key1[] = "ghi".getBytes(UTF_8); + final byte value1[] = "jkl".getBytes(UTF_8); + + // Set a snapshot at start of transaction by setting setSnapshot(true) + try(final OptimisticTransactionOptions txnOptions = + new OptimisticTransactionOptions().setSetSnapshot(true); + final Transaction txn = + txnDb.beginTransaction(writeOptions, txnOptions)) { + + final Snapshot snapshot = txn.getSnapshot(); + + // Write a key OUTSIDE of transaction + txnDb.put(writeOptions, key1, value1); + + // Read a key using the snapshot. + readOptions.setSnapshot(snapshot); + final byte[] value = txn.getForUpdate(readOptions, key1, true); + assert (value == null); + + try { + // Attempt to commit transaction + txn.commit(); + throw new IllegalStateException(); + } catch(final RocksDBException e) { + // Transaction could not commit since the write outside of the txn + // conflicted with the read! + assert(e.getStatus().getCode() == Status.Code.Busy); + } + + txn.rollback(); + } finally { + // Clear snapshot from read options since it is no longer valid + readOptions.setSnapshot(null); + } + } + + /** + * Demonstrates "Read Committed" (Monotonic Atomic Views) isolation + * + * In this example, we set the snapshot multiple times. This is probably + * only necessary if you have very strict isolation requirements to + * implement. + */ + private static void readCommitted_monotonicAtomicViews( + final OptimisticTransactionDB txnDb, final WriteOptions writeOptions, + final ReadOptions readOptions) throws RocksDBException { + + final byte keyX[] = "x".getBytes(UTF_8); + final byte valueX[] = "x".getBytes(UTF_8); + + final byte keyY[] = "y".getBytes(UTF_8); + final byte valueY[] = "y".getBytes(UTF_8); + + try (final OptimisticTransactionOptions txnOptions = + new OptimisticTransactionOptions().setSetSnapshot(true); + final Transaction txn = + txnDb.beginTransaction(writeOptions, txnOptions)) { + + // Do some reads and writes to key "x" + Snapshot snapshot = txnDb.getSnapshot(); + readOptions.setSnapshot(snapshot); + byte[] value = txn.get(readOptions, keyX); + txn.put(valueX, valueX); + + // Do a write outside of the transaction to key "y" + txnDb.put(writeOptions, keyY, valueY); + + // Set a new snapshot in the transaction + txn.setSnapshot(); + snapshot = txnDb.getSnapshot(); + readOptions.setSnapshot(snapshot); + + // Do some reads and writes to key "y" + // Since the snapshot was advanced, the write done outside of the + // transaction does not conflict. + value = txn.getForUpdate(readOptions, keyY, true); + txn.put(keyY, valueY); + + // Commit. Since the snapshot was advanced, the write done outside of the + // transaction does not prevent this transaction from Committing. + txn.commit(); + + } finally { + // Clear snapshot from read options since it is no longer valid + readOptions.setSnapshot(null); + } + } +} diff --git a/src/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java b/src/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java new file mode 100644 index 000000000..72f5731a1 --- /dev/null +++ b/src/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java @@ -0,0 +1,78 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +import org.rocksdb.*; + +import java.util.ArrayList; +import java.util.List; + +public class RocksDBColumnFamilySample { + static { + RocksDB.loadLibrary(); + } + + public static void main(final String[] args) throws RocksDBException { + if (args.length < 1) { + System.out.println( + "usage: RocksDBColumnFamilySample db_path"); + System.exit(-1); + } + + final String db_path = args[0]; + + System.out.println("RocksDBColumnFamilySample"); + try(final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, db_path)) { + + assert(db != null); + + // create column family + try(final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf".getBytes(), + new ColumnFamilyOptions()))) { + assert (columnFamilyHandle != null); + } + } + + // open DB with two column families + final List columnFamilyDescriptors = + new ArrayList<>(); + // have to open default column family + columnFamilyDescriptors.add(new ColumnFamilyDescriptor( + RocksDB.DEFAULT_COLUMN_FAMILY, new ColumnFamilyOptions())); + // open the new one, too + columnFamilyDescriptors.add(new ColumnFamilyDescriptor( + "new_cf".getBytes(), new ColumnFamilyOptions())); + final List columnFamilyHandles = new ArrayList<>(); + try(final DBOptions options = new DBOptions(); + final RocksDB db = RocksDB.open(options, db_path, + columnFamilyDescriptors, columnFamilyHandles)) { + assert(db != null); + + try { + // put and get from non-default column family + db.put( + columnFamilyHandles.get(1), new WriteOptions(), "key".getBytes(), "value".getBytes()); + + // atomic write + try (final WriteBatch wb = new WriteBatch()) { + wb.put(columnFamilyHandles.get(0), "key2".getBytes(), + "value2".getBytes()); + wb.put(columnFamilyHandles.get(1), "key3".getBytes(), + "value3".getBytes()); + wb.delete(columnFamilyHandles.get(1), "key".getBytes()); + db.write(new WriteOptions(), wb); + } + + // drop column family + db.dropColumnFamily(columnFamilyHandles.get(1)); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } +} diff --git a/src/rocksdb/java/samples/src/main/java/RocksDBSample.java b/src/rocksdb/java/samples/src/main/java/RocksDBSample.java new file mode 100644 index 000000000..ea650b141 --- /dev/null +++ b/src/rocksdb/java/samples/src/main/java/RocksDBSample.java @@ -0,0 +1,296 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +import java.lang.IllegalArgumentException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.ArrayList; + +import org.rocksdb.*; +import org.rocksdb.util.SizeUnit; + +public class RocksDBSample { + static { + RocksDB.loadLibrary(); + } + + public static void main(final String[] args) { + if (args.length < 1) { + System.out.println("usage: RocksDBSample db_path"); + System.exit(-1); + } + + final String db_path = args[0]; + final String db_path_not_found = db_path + "_not_found"; + + System.out.println("RocksDBSample"); + try (final Options options = new Options(); + final Filter bloomFilter = new BloomFilter(10); + final ReadOptions readOptions = new ReadOptions() + .setFillCache(false); + final Statistics stats = new Statistics(); + final RateLimiter rateLimiter = new RateLimiter(10000000,10000, 10)) { + + try (final RocksDB db = RocksDB.open(options, db_path_not_found)) { + assert (false); + } catch (final RocksDBException e) { + System.out.format("Caught the expected exception -- %s\n", e); + } + + try { + options.setCreateIfMissing(true) + .setStatistics(stats) + .setWriteBufferSize(8 * SizeUnit.KB) + .setMaxWriteBufferNumber(3) + .setMaxBackgroundJobs(10) + .setCompressionType(CompressionType.ZLIB_COMPRESSION) + .setCompactionStyle(CompactionStyle.UNIVERSAL); + } catch (final IllegalArgumentException e) { + assert (false); + } + + assert (options.createIfMissing() == true); + assert (options.writeBufferSize() == 8 * SizeUnit.KB); + assert (options.maxWriteBufferNumber() == 3); + assert (options.maxBackgroundJobs() == 10); + assert (options.compressionType() == CompressionType.ZLIB_COMPRESSION); + assert (options.compactionStyle() == CompactionStyle.UNIVERSAL); + + assert (options.memTableFactoryName().equals("SkipListFactory")); + options.setMemTableConfig( + new HashSkipListMemTableConfig() + .setHeight(4) + .setBranchingFactor(4) + .setBucketCount(2000000)); + assert (options.memTableFactoryName().equals("HashSkipListRepFactory")); + + options.setMemTableConfig( + new HashLinkedListMemTableConfig() + .setBucketCount(100000)); + assert (options.memTableFactoryName().equals("HashLinkedListRepFactory")); + + options.setMemTableConfig( + new VectorMemTableConfig().setReservedSize(10000)); + assert (options.memTableFactoryName().equals("VectorRepFactory")); + + options.setMemTableConfig(new SkipListMemTableConfig()); + assert (options.memTableFactoryName().equals("SkipListFactory")); + + options.setTableFormatConfig(new PlainTableConfig()); + // Plain-Table requires mmap read + options.setAllowMmapReads(true); + assert (options.tableFactoryName().equals("PlainTable")); + + options.setRateLimiter(rateLimiter); + + final BlockBasedTableConfig table_options = new BlockBasedTableConfig(); + Cache cache = new LRUCache(64 * 1024, 6); + table_options.setBlockCache(cache) + .setFilterPolicy(bloomFilter) + .setBlockSizeDeviation(5) + .setBlockRestartInterval(10) + .setCacheIndexAndFilterBlocks(true) + .setBlockCacheCompressed(new LRUCache(64 * 1000, 10)); + + assert (table_options.blockSizeDeviation() == 5); + assert (table_options.blockRestartInterval() == 10); + assert (table_options.cacheIndexAndFilterBlocks() == true); + + options.setTableFormatConfig(table_options); + assert (options.tableFactoryName().equals("BlockBasedTable")); + + try (final RocksDB db = RocksDB.open(options, db_path)) { + db.put("hello".getBytes(), "world".getBytes()); + + final byte[] value = db.get("hello".getBytes()); + assert ("world".equals(new String(value))); + + final String str = db.getProperty("rocksdb.stats"); + assert (str != null && !str.equals("")); + } catch (final RocksDBException e) { + System.out.format("[ERROR] caught the unexpected exception -- %s\n", e); + assert (false); + } + + try (final RocksDB db = RocksDB.open(options, db_path)) { + db.put("hello".getBytes(), "world".getBytes()); + byte[] value = db.get("hello".getBytes()); + System.out.format("Get('hello') = %s\n", + new String(value)); + + for (int i = 1; i <= 9; ++i) { + for (int j = 1; j <= 9; ++j) { + db.put(String.format("%dx%d", i, j).getBytes(), + String.format("%d", i * j).getBytes()); + } + } + + for (int i = 1; i <= 9; ++i) { + for (int j = 1; j <= 9; ++j) { + System.out.format("%s ", new String(db.get( + String.format("%dx%d", i, j).getBytes()))); + } + System.out.println(""); + } + + // write batch test + try (final WriteOptions writeOpt = new WriteOptions()) { + for (int i = 10; i <= 19; ++i) { + try (final WriteBatch batch = new WriteBatch()) { + for (int j = 10; j <= 19; ++j) { + batch.put(String.format("%dx%d", i, j).getBytes(), + String.format("%d", i * j).getBytes()); + } + db.write(writeOpt, batch); + } + } + } + for (int i = 10; i <= 19; ++i) { + for (int j = 10; j <= 19; ++j) { + assert (new String( + db.get(String.format("%dx%d", i, j).getBytes())).equals( + String.format("%d", i * j))); + System.out.format("%s ", new String(db.get( + String.format("%dx%d", i, j).getBytes()))); + } + System.out.println(""); + } + + value = db.get("1x1".getBytes()); + assert (value != null); + value = db.get("world".getBytes()); + assert (value == null); + value = db.get(readOptions, "world".getBytes()); + assert (value == null); + + final byte[] testKey = "asdf".getBytes(); + final byte[] testValue = + "asdfghjkl;'?> insufficientArray.length); + len = db.get("asdfjkl;".getBytes(), enoughArray); + assert (len == RocksDB.NOT_FOUND); + len = db.get(testKey, enoughArray); + assert (len == testValue.length); + + len = db.get(readOptions, testKey, insufficientArray); + assert (len > insufficientArray.length); + len = db.get(readOptions, "asdfjkl;".getBytes(), enoughArray); + assert (len == RocksDB.NOT_FOUND); + len = db.get(readOptions, testKey, enoughArray); + assert (len == testValue.length); + + db.delete(testKey); + len = db.get(testKey, enoughArray); + assert (len == RocksDB.NOT_FOUND); + + // repeat the test with WriteOptions + try (final WriteOptions writeOpts = new WriteOptions()) { + writeOpts.setSync(true); + writeOpts.setDisableWAL(false); + db.put(writeOpts, testKey, testValue); + len = db.get(testKey, enoughArray); + assert (len == testValue.length); + assert (new String(testValue).equals( + new String(enoughArray, 0, len))); + } + + try { + for (final TickerType statsType : TickerType.values()) { + if (statsType != TickerType.TICKER_ENUM_MAX) { + stats.getTickerCount(statsType); + } + } + System.out.println("getTickerCount() passed."); + } catch (final Exception e) { + System.out.println("Failed in call to getTickerCount()"); + assert (false); //Should never reach here. + } + + try { + for (final HistogramType histogramType : HistogramType.values()) { + if (histogramType != HistogramType.HISTOGRAM_ENUM_MAX) { + HistogramData data = stats.getHistogramData(histogramType); + } + } + System.out.println("getHistogramData() passed."); + } catch (final Exception e) { + System.out.println("Failed in call to getHistogramData()"); + assert (false); //Should never reach here. + } + + try (final RocksIterator iterator = db.newIterator()) { + + boolean seekToFirstPassed = false; + for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) { + iterator.status(); + assert (iterator.key() != null); + assert (iterator.value() != null); + seekToFirstPassed = true; + } + if (seekToFirstPassed) { + System.out.println("iterator seekToFirst tests passed."); + } + + boolean seekToLastPassed = false; + for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) { + iterator.status(); + assert (iterator.key() != null); + assert (iterator.value() != null); + seekToLastPassed = true; + } + + if (seekToLastPassed) { + System.out.println("iterator seekToLastPassed tests passed."); + } + + iterator.seekToFirst(); + iterator.seek(iterator.key()); + assert (iterator.key() != null); + assert (iterator.value() != null); + + System.out.println("iterator seek test passed."); + + } + System.out.println("iterator tests passed."); + + final List keys = new ArrayList<>(); + try (final RocksIterator iterator = db.newIterator()) { + for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) { + keys.add(iterator.key()); + } + } + + List values = db.multiGetAsList(keys); + assert (values.size() == keys.size()); + for (final byte[] value1 : values) { + assert (value1 != null); + } + + values = db.multiGetAsList(new ReadOptions(), keys); + assert (values.size() == keys.size()); + for (final byte[] value1 : values) { + assert (value1 != null); + } + } catch (final RocksDBException e) { + System.err.println(e); + } + } + } +} diff --git a/src/rocksdb/java/samples/src/main/java/TransactionSample.java b/src/rocksdb/java/samples/src/main/java/TransactionSample.java new file mode 100644 index 000000000..b88a68f12 --- /dev/null +++ b/src/rocksdb/java/samples/src/main/java/TransactionSample.java @@ -0,0 +1,183 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +import org.rocksdb.*; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * Demonstrates using Transactions on a TransactionDB with + * varying isolation guarantees + */ +public class TransactionSample { + private static final String dbPath = "/tmp/rocksdb_transaction_example"; + + public static final void main(final String args[]) throws RocksDBException { + + try(final Options options = new Options() + .setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB txnDb = + TransactionDB.open(options, txnDbOptions, dbPath)) { + + try (final WriteOptions writeOptions = new WriteOptions(); + final ReadOptions readOptions = new ReadOptions()) { + + //////////////////////////////////////////////////////// + // + // Simple Transaction Example ("Read Committed") + // + //////////////////////////////////////////////////////// + readCommitted(txnDb, writeOptions, readOptions); + + + //////////////////////////////////////////////////////// + // + // "Repeatable Read" (Snapshot Isolation) Example + // -- Using a single Snapshot + // + //////////////////////////////////////////////////////// + repeatableRead(txnDb, writeOptions, readOptions); + + + //////////////////////////////////////////////////////// + // + // "Read Committed" (Monotonic Atomic Views) Example + // --Using multiple Snapshots + // + //////////////////////////////////////////////////////// + readCommitted_monotonicAtomicViews(txnDb, writeOptions, readOptions); + } + } + } + + /** + * Demonstrates "Read Committed" isolation + */ + private static void readCommitted(final TransactionDB txnDb, + final WriteOptions writeOptions, final ReadOptions readOptions) + throws RocksDBException { + final byte key1[] = "abc".getBytes(UTF_8); + final byte value1[] = "def".getBytes(UTF_8); + + final byte key2[] = "xyz".getBytes(UTF_8); + final byte value2[] = "zzz".getBytes(UTF_8); + + // Start a transaction + try(final Transaction txn = txnDb.beginTransaction(writeOptions)) { + // Read a key in this transaction + byte[] value = txn.get(readOptions, key1); + assert(value == null); + + // Write a key in this transaction + txn.put(key1, value1); + + // Read a key OUTSIDE this transaction. Does not affect txn. + value = txnDb.get(readOptions, key1); + assert(value == null); + + // Write a key OUTSIDE of this transaction. + // Does not affect txn since this is an unrelated key. + // If we wrote key 'abc' here, the transaction would fail to commit. + txnDb.put(writeOptions, key2, value2); + + // Commit transaction + txn.commit(); + } + } + + /** + * Demonstrates "Repeatable Read" (Snapshot Isolation) isolation + */ + private static void repeatableRead(final TransactionDB txnDb, + final WriteOptions writeOptions, final ReadOptions readOptions) + throws RocksDBException { + + final byte key1[] = "ghi".getBytes(UTF_8); + final byte value1[] = "jkl".getBytes(UTF_8); + + // Set a snapshot at start of transaction by setting setSnapshot(true) + try(final TransactionOptions txnOptions = new TransactionOptions() + .setSetSnapshot(true); + final Transaction txn = + txnDb.beginTransaction(writeOptions, txnOptions)) { + + final Snapshot snapshot = txn.getSnapshot(); + + // Write a key OUTSIDE of transaction + txnDb.put(writeOptions, key1, value1); + + // Attempt to read a key using the snapshot. This will fail since + // the previous write outside this txn conflicts with this read. + readOptions.setSnapshot(snapshot); + + try { + final byte[] value = txn.getForUpdate(readOptions, key1, true); + throw new IllegalStateException(); + } catch(final RocksDBException e) { + assert(e.getStatus().getCode() == Status.Code.Busy); + } + + txn.rollback(); + } finally { + // Clear snapshot from read options since it is no longer valid + readOptions.setSnapshot(null); + } + } + + /** + * Demonstrates "Read Committed" (Monotonic Atomic Views) isolation + * + * In this example, we set the snapshot multiple times. This is probably + * only necessary if you have very strict isolation requirements to + * implement. + */ + private static void readCommitted_monotonicAtomicViews( + final TransactionDB txnDb, final WriteOptions writeOptions, + final ReadOptions readOptions) throws RocksDBException { + + final byte keyX[] = "x".getBytes(UTF_8); + final byte valueX[] = "x".getBytes(UTF_8); + + final byte keyY[] = "y".getBytes(UTF_8); + final byte valueY[] = "y".getBytes(UTF_8); + + try (final TransactionOptions txnOptions = new TransactionOptions() + .setSetSnapshot(true); + final Transaction txn = + txnDb.beginTransaction(writeOptions, txnOptions)) { + + // Do some reads and writes to key "x" + Snapshot snapshot = txnDb.getSnapshot(); + readOptions.setSnapshot(snapshot); + byte[] value = txn.get(readOptions, keyX); + txn.put(valueX, valueX); + + // Do a write outside of the transaction to key "y" + txnDb.put(writeOptions, keyY, valueY); + + // Set a new snapshot in the transaction + txn.setSnapshot(); + txn.setSavePoint(); + snapshot = txnDb.getSnapshot(); + readOptions.setSnapshot(snapshot); + + // Do some reads and writes to key "y" + // Since the snapshot was advanced, the write done outside of the + // transaction does not conflict. + value = txn.getForUpdate(readOptions, keyY, true); + txn.put(keyY, valueY); + + // Decide we want to revert the last write from this transaction. + txn.rollbackToSavePoint(); + + // Commit. + txn.commit(); + } finally { + // Clear snapshot from read options since it is no longer valid + readOptions.setSnapshot(null); + } + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java new file mode 100644 index 000000000..2f0d4f3ca --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -0,0 +1,59 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +/** + * A CompactionFilter allows an application to modify/delete a key-value at + * the time of compaction. + * + * At present we just permit an overriding Java class to wrap a C++ + * implementation + */ +public abstract class AbstractCompactionFilter> + extends RocksObject { + + public static class Context { + private final boolean fullCompaction; + private final boolean manualCompaction; + + public Context(final boolean fullCompaction, final boolean manualCompaction) { + this.fullCompaction = fullCompaction; + this.manualCompaction = manualCompaction; + } + + /** + * Does this compaction run include all data files + * + * @return true if this is a full compaction run + */ + public boolean isFullCompaction() { + return fullCompaction; + } + + /** + * Is this compaction requested by the client, + * or is it occurring as an automatic compaction process + * + * @return true if the compaction was initiated by the client + */ + public boolean isManualCompaction() { + return manualCompaction; + } + } + + protected AbstractCompactionFilter(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Deletes underlying C++ compaction pointer. + * + * Note that this function should be called only after all + * RocksDB instances referencing the compaction filter are closed. + * Otherwise an undefined behavior will occur. + */ + @Override + protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java new file mode 100644 index 000000000..380b4461d --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilterFactory.java @@ -0,0 +1,77 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Each compaction will create a new {@link AbstractCompactionFilter} + * allowing the application to know about different compactions + * + * @param The concrete type of the compaction filter + */ +public abstract class AbstractCompactionFilterFactory> + extends RocksCallbackObject { + + public AbstractCompactionFilterFactory() { + super(null); + } + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewCompactionFilterFactory0(); + } + + /** + * Called from JNI, see compaction_filter_factory_jnicallback.cc + * + * @param fullCompaction {@link AbstractCompactionFilter.Context#fullCompaction} + * @param manualCompaction {@link AbstractCompactionFilter.Context#manualCompaction} + * + * @return native handle of the CompactionFilter + */ + private long createCompactionFilter(final boolean fullCompaction, + final boolean manualCompaction) { + final T filter = createCompactionFilter( + new AbstractCompactionFilter.Context(fullCompaction, manualCompaction)); + + // CompactionFilterFactory::CreateCompactionFilter returns a std::unique_ptr + // which therefore has ownership of the underlying native object + filter.disOwnNativeHandle(); + + return filter.nativeHandle_; + } + + /** + * Create a new compaction filter + * + * @param context The context describing the need for a new compaction filter + * + * @return A new instance of {@link AbstractCompactionFilter} + */ + public abstract T createCompactionFilter( + final AbstractCompactionFilter.Context context); + + /** + * A name which identifies this compaction filter + * + * The name will be printed to the LOG file on start up for diagnosis + * + * @return name which identifies this compaction filter. + */ + public abstract String name(); + + /** + * We override {@link RocksCallbackObject#disposeInternal()} + * as disposing of a rocksdb::AbstractCompactionFilterFactory requires + * a slightly different approach as it is a std::shared_ptr + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + private native long createNewCompactionFilterFactory0(); + private native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java new file mode 100644 index 000000000..c08e9127c --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java @@ -0,0 +1,124 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + * Comparators are used by RocksDB to determine + * the ordering of keys. + * + * Implementations of Comparators in Java should extend this class. + */ +public abstract class AbstractComparator + extends RocksCallbackObject { + + AbstractComparator() { + super(); + } + + protected AbstractComparator(final ComparatorOptions copt) { + super(copt.nativeHandle_); + } + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewComparator(nativeParameterHandles[0]); + } + + /** + * Get the type of this comparator. + * + * Used for determining the correct C++ cast in native code. + * + * @return The type of the comparator. + */ + ComparatorType getComparatorType() { + return ComparatorType.JAVA_COMPARATOR; + } + + /** + * The name of the comparator. Used to check for comparator + * mismatches (i.e., a DB created with one comparator is + * accessed using a different comparator). + * + * A new name should be used whenever + * the comparator implementation changes in a way that will cause + * the relative ordering of any two keys to change. + * + * Names starting with "rocksdb." are reserved and should not be used. + * + * @return The name of this comparator implementation + */ + public abstract String name(); + + /** + * Three-way key comparison. Implementations should provide a + * total order + * on keys that might be passed to it. + * + * The implementation may modify the {@code ByteBuffer}s passed in, though + * it would be unconventional to modify the "limit" or any of the + * underlying bytes. As a callback, RocksJava will ensure that {@code a} + * is a different instance from {@code b}. + * + * @param a buffer containing the first key in its "remaining" elements + * @param b buffer containing the second key in its "remaining" elements + * + * @return Should return either: + * 1) < 0 if "a" < "b" + * 2) == 0 if "a" == "b" + * 3) > 0 if "a" > "b" + */ + public abstract int compare(final ByteBuffer a, final ByteBuffer b); + + /** + *

Used to reduce the space requirements + * for internal data structures like index blocks.

+ * + *

If start < limit, you may modify start which is a + * shorter string in [start, limit).

+ * + * If you modify start, it is expected that you set the byte buffer so that + * a subsequent read of start.remaining() bytes from start.position() + * to start.limit() will obtain the new start value. + * + *

Simple comparator implementations may return with start unchanged. + * i.e., an implementation of this method that does nothing is correct.

+ * + * @param start the start + * @param limit the limit + */ + public void findShortestSeparator(final ByteBuffer start, + final ByteBuffer limit) { + // no-op + } + + /** + *

Used to reduce the space requirements + * for internal data structures like index blocks.

+ * + *

You may change key to a shorter key (key1) where + * key1 ≥ key.

+ * + *

Simple comparator implementations may return the key unchanged. + * i.e., an implementation of + * this method that does nothing is correct.

+ * + * @param key the key + */ + public void findShortSuccessor(final ByteBuffer key) { + // no-op + } + + public final boolean usingDirectBuffers() { + return usingDirectBuffers(nativeHandle_); + } + + private native boolean usingDirectBuffers(final long nativeHandle); + + private native long createNewComparator(final long comparatorOptionsHandle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java new file mode 100644 index 000000000..b732d2495 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractComparatorJniBridge.java @@ -0,0 +1,125 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + * This class is intentionally private, + * it holds methods which are called + * from C++ to interact with a Comparator + * written in Java. + * + * Placing these bridge methods in this + * class keeps the API of the + * {@link org.rocksdb.AbstractComparator} clean. + */ +class AbstractComparatorJniBridge { + + /** + * Only called from JNI. + * + * Simply a bridge to calling + * {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)}, + * which ensures that the byte buffer lengths are correct + * before and after the call. + * + * @param comparator the comparator object on which to + * call {@link AbstractComparator#compare(ByteBuffer, ByteBuffer)} + * @param a buffer access to first key + * @param aLen the length of the a key, + * may be smaller than the buffer {@code a} + * @param b buffer access to second key + * @param bLen the length of the b key, + * may be smaller than the buffer {@code b} + * + * @return the result of the comparison + */ + private static int compareInternal( + final AbstractComparator comparator, + final ByteBuffer a, final int aLen, + final ByteBuffer b, final int bLen) { + if (aLen != -1) { + a.mark(); + a.limit(aLen); + } + if (bLen != -1) { + b.mark(); + b.limit(bLen); + } + + final int c = comparator.compare(a, b); + + if (aLen != -1) { + a.reset(); + } + if (bLen != -1) { + b.reset(); + } + + return c; + } + + /** + * Only called from JNI. + * + * Simply a bridge to calling + * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)}, + * which ensures that the byte buffer lengths are correct + * before the call. + * + * @param comparator the comparator object on which to + * call {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)} + * @param start buffer access to the start key + * @param startLen the length of the start key, + * may be smaller than the buffer {@code start} + * @param limit buffer access to the limit key + * @param limitLen the length of the limit key, + * may be smaller than the buffer {@code limit} + * + * @return either {@code startLen} if the start key is unchanged, otherwise + * the new length of the start key + */ + private static int findShortestSeparatorInternal( + final AbstractComparator comparator, + final ByteBuffer start, final int startLen, + final ByteBuffer limit, final int limitLen) { + if (startLen != -1) { + start.limit(startLen); + } + if (limitLen != -1) { + limit.limit(limitLen); + } + comparator.findShortestSeparator(start, limit); + return start.remaining(); + } + + /** + * Only called from JNI. + * + * Simply a bridge to calling + * {@link AbstractComparator#findShortestSeparator(ByteBuffer, ByteBuffer)}, + * which ensures that the byte buffer length is correct + * before the call. + * + * @param comparator the comparator object on which to + * call {@link AbstractComparator#findShortSuccessor(ByteBuffer)} + * @param key buffer access to the key + * @param keyLen the length of the key, + * may be smaller than the buffer {@code key} + * + * @return either keyLen if the key is unchanged, otherwise the new length of the key + */ + private static int findShortSuccessorInternal( + final AbstractComparator comparator, + final ByteBuffer key, final int keyLen) { + if (keyLen != -1) { + key.limit(keyLen); + } + comparator.findShortSuccessor(key); + return key.remaining(); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractEventListener.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractEventListener.java new file mode 100644 index 000000000..6698acf88 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractEventListener.java @@ -0,0 +1,334 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.rocksdb.AbstractEventListener.EnabledEventCallback.*; + +/** + * Base class for Event Listeners. + */ +public abstract class AbstractEventListener extends RocksCallbackObject implements EventListener { + public enum EnabledEventCallback { + ON_FLUSH_COMPLETED((byte) 0x0), + ON_FLUSH_BEGIN((byte) 0x1), + ON_TABLE_FILE_DELETED((byte) 0x2), + ON_COMPACTION_BEGIN((byte) 0x3), + ON_COMPACTION_COMPLETED((byte) 0x4), + ON_TABLE_FILE_CREATED((byte) 0x5), + ON_TABLE_FILE_CREATION_STARTED((byte) 0x6), + ON_MEMTABLE_SEALED((byte) 0x7), + ON_COLUMN_FAMILY_HANDLE_DELETION_STARTED((byte) 0x8), + ON_EXTERNAL_FILE_INGESTED((byte) 0x9), + ON_BACKGROUND_ERROR((byte) 0xA), + ON_STALL_CONDITIONS_CHANGED((byte) 0xB), + ON_FILE_READ_FINISH((byte) 0xC), + ON_FILE_WRITE_FINISH((byte) 0xD), + ON_FILE_FLUSH_FINISH((byte) 0xE), + ON_FILE_SYNC_FINISH((byte) 0xF), + ON_FILE_RANGE_SYNC_FINISH((byte) 0x10), + ON_FILE_TRUNCATE_FINISH((byte) 0x11), + ON_FILE_CLOSE_FINISH((byte) 0x12), + SHOULD_BE_NOTIFIED_ON_FILE_IO((byte) 0x13), + ON_ERROR_RECOVERY_BEGIN((byte) 0x14), + ON_ERROR_RECOVERY_COMPLETED((byte) 0x15); + + private final byte value; + + EnabledEventCallback(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value + */ + byte getValue() { + return value; + } + + /** + * Get the EnabledEventCallbacks from the internal representation value. + * + * @return the enabled event callback. + * + * @throws IllegalArgumentException if the value is unknown. + */ + static EnabledEventCallback fromValue(final byte value) { + for (final EnabledEventCallback enabledEventCallback : EnabledEventCallback.values()) { + if (enabledEventCallback.value == value) { + return enabledEventCallback; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for EnabledEventCallback: " + value); + } + } + + /** + * Creates an Event Listener that will + * received all callbacks from C++. + * + * If you don't need all callbacks, it is much more efficient to + * just register for the ones you need by calling + * {@link #AbstractEventListener(EnabledEventCallback...)} instead. + */ + protected AbstractEventListener() { + this(ON_FLUSH_COMPLETED, ON_FLUSH_BEGIN, ON_TABLE_FILE_DELETED, ON_COMPACTION_BEGIN, + ON_COMPACTION_COMPLETED, ON_TABLE_FILE_CREATED, ON_TABLE_FILE_CREATION_STARTED, + ON_MEMTABLE_SEALED, ON_COLUMN_FAMILY_HANDLE_DELETION_STARTED, ON_EXTERNAL_FILE_INGESTED, + ON_BACKGROUND_ERROR, ON_STALL_CONDITIONS_CHANGED, ON_FILE_READ_FINISH, ON_FILE_WRITE_FINISH, + ON_FILE_FLUSH_FINISH, ON_FILE_SYNC_FINISH, ON_FILE_RANGE_SYNC_FINISH, + ON_FILE_TRUNCATE_FINISH, ON_FILE_CLOSE_FINISH, SHOULD_BE_NOTIFIED_ON_FILE_IO, + ON_ERROR_RECOVERY_BEGIN, ON_ERROR_RECOVERY_COMPLETED); + } + + /** + * Creates an Event Listener that will + * receive only certain callbacks from C++. + * + * @param enabledEventCallbacks callbacks to enable in Java. + */ + protected AbstractEventListener(final EnabledEventCallback... enabledEventCallbacks) { + super(packToLong(enabledEventCallbacks)); + } + + /** + * Pack EnabledEventCallbacks to a long. + * + * @param enabledEventCallbacks the flags + * + * @return a long + */ + private static long packToLong(final EnabledEventCallback... enabledEventCallbacks) { + long l = 0; + for (int i = 0; i < enabledEventCallbacks.length; i++) { + l |= 1 << enabledEventCallbacks[i].getValue(); + } + return l; + } + + @Override + public void onFlushCompleted(final RocksDB db, final FlushJobInfo flushJobInfo) { + // no-op + } + + /** + * Called from JNI, proxy for + * {@link #onFlushCompleted(RocksDB, FlushJobInfo)}. + * + * @param dbHandle native handle of the database + * @param flushJobInfo the flush job info + */ + private void onFlushCompletedProxy(final long dbHandle, final FlushJobInfo flushJobInfo) { + final RocksDB db = new RocksDB(dbHandle); + db.disOwnNativeHandle(); // we don't own this! + onFlushCompleted(db, flushJobInfo); + } + + @Override + public void onFlushBegin(final RocksDB db, final FlushJobInfo flushJobInfo) { + // no-op + } + + /** + * Called from JNI, proxy for + * {@link #onFlushBegin(RocksDB, FlushJobInfo)}. + * + * @param dbHandle native handle of the database + * @param flushJobInfo the flush job info + */ + private void onFlushBeginProxy(final long dbHandle, final FlushJobInfo flushJobInfo) { + final RocksDB db = new RocksDB(dbHandle); + db.disOwnNativeHandle(); // we don't own this! + onFlushBegin(db, flushJobInfo); + } + + @Override + public void onTableFileDeleted(final TableFileDeletionInfo tableFileDeletionInfo) { + // no-op + } + + @Override + public void onCompactionBegin(final RocksDB db, final CompactionJobInfo compactionJobInfo) { + // no-op + } + + /** + * Called from JNI, proxy for + * {@link #onCompactionBegin(RocksDB, CompactionJobInfo)}. + * + * @param dbHandle native handle of the database + * @param compactionJobInfo the flush job info + */ + private void onCompactionBeginProxy( + final long dbHandle, final CompactionJobInfo compactionJobInfo) { + final RocksDB db = new RocksDB(dbHandle); + db.disOwnNativeHandle(); // we don't own this! + onCompactionBegin(db, compactionJobInfo); + } + + @Override + public void onCompactionCompleted(final RocksDB db, final CompactionJobInfo compactionJobInfo) { + // no-op + } + + /** + * Called from JNI, proxy for + * {@link #onCompactionCompleted(RocksDB, CompactionJobInfo)}. + * + * @param dbHandle native handle of the database + * @param compactionJobInfo the flush job info + */ + private void onCompactionCompletedProxy( + final long dbHandle, final CompactionJobInfo compactionJobInfo) { + final RocksDB db = new RocksDB(dbHandle); + db.disOwnNativeHandle(); // we don't own this! + onCompactionCompleted(db, compactionJobInfo); + } + + @Override + public void onTableFileCreated(final TableFileCreationInfo tableFileCreationInfo) { + // no-op + } + + @Override + public void onTableFileCreationStarted( + final TableFileCreationBriefInfo tableFileCreationBriefInfo) { + // no-op + } + + @Override + public void onMemTableSealed(final MemTableInfo memTableInfo) { + // no-op + } + + @Override + public void onColumnFamilyHandleDeletionStarted(final ColumnFamilyHandle columnFamilyHandle) { + // no-op + } + + @Override + public void onExternalFileIngested( + final RocksDB db, final ExternalFileIngestionInfo externalFileIngestionInfo) { + // no-op + } + + /** + * Called from JNI, proxy for + * {@link #onExternalFileIngested(RocksDB, ExternalFileIngestionInfo)}. + * + * @param dbHandle native handle of the database + * @param externalFileIngestionInfo the flush job info + */ + private void onExternalFileIngestedProxy( + final long dbHandle, final ExternalFileIngestionInfo externalFileIngestionInfo) { + final RocksDB db = new RocksDB(dbHandle); + db.disOwnNativeHandle(); // we don't own this! + onExternalFileIngested(db, externalFileIngestionInfo); + } + + @Override + public void onBackgroundError( + final BackgroundErrorReason backgroundErrorReason, final Status backgroundError) { + // no-op + } + + /** + * Called from JNI, proxy for + * {@link #onBackgroundError(BackgroundErrorReason, Status)}. + * + * @param reasonByte byte value representing error reason + * @param backgroundError status with error code + */ + private void onBackgroundErrorProxy(final byte reasonByte, final Status backgroundError) { + onBackgroundError(BackgroundErrorReason.fromValue(reasonByte), backgroundError); + } + + @Override + public void onStallConditionsChanged(final WriteStallInfo writeStallInfo) { + // no-op + } + + @Override + public void onFileReadFinish(final FileOperationInfo fileOperationInfo) { + // no-op + } + + @Override + public void onFileWriteFinish(final FileOperationInfo fileOperationInfo) { + // no-op + } + + @Override + public void onFileFlushFinish(final FileOperationInfo fileOperationInfo) { + // no-op + } + + @Override + public void onFileSyncFinish(final FileOperationInfo fileOperationInfo) { + // no-op + } + + @Override + public void onFileRangeSyncFinish(final FileOperationInfo fileOperationInfo) { + // no-op + } + + @Override + public void onFileTruncateFinish(final FileOperationInfo fileOperationInfo) { + // no-op + } + + @Override + public void onFileCloseFinish(final FileOperationInfo fileOperationInfo) { + // no-op + } + + @Override + public boolean shouldBeNotifiedOnFileIO() { + return false; + } + + @Override + public boolean onErrorRecoveryBegin( + final BackgroundErrorReason backgroundErrorReason, final Status backgroundError) { + return true; + } + + /** + * Called from JNI, proxy for + * {@link #onErrorRecoveryBegin(BackgroundErrorReason, Status)}. + * + * @param reasonByte byte value representing error reason + * @param backgroundError status with error code + */ + private boolean onErrorRecoveryBeginProxy(final byte reasonByte, final Status backgroundError) { + return onErrorRecoveryBegin(BackgroundErrorReason.fromValue(reasonByte), backgroundError); + } + + @Override + public void onErrorRecoveryCompleted(final Status oldBackgroundError) { + // no-op + } + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewEventListener(nativeParameterHandles[0]); + } + + /** + * Deletes underlying C++ native callback object pointer + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + private native long createNewEventListener(final long enabledEventCallbackValues); + private native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java new file mode 100644 index 000000000..173d63e90 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java @@ -0,0 +1,65 @@ +// Copyright (c) 2016, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Offers functionality for implementations of + * {@link AbstractNativeReference} which have an immutable reference to the + * underlying native C++ object + */ +//@ThreadSafe +public abstract class AbstractImmutableNativeReference + extends AbstractNativeReference { + + /** + * A flag indicating whether the current {@code AbstractNativeReference} is + * responsible to free the underlying C++ object + */ + protected final AtomicBoolean owningHandle_; + + protected AbstractImmutableNativeReference(final boolean owningHandle) { + this.owningHandle_ = new AtomicBoolean(owningHandle); + } + + @Override + public boolean isOwningHandle() { + return owningHandle_.get(); + } + + /** + * Releases this {@code AbstractNativeReference} from the responsibility of + * freeing the underlying native C++ object + *

+ * This will prevent the object from attempting to delete the underlying + * native object in {@code close()}. This must be used when another object + * takes over ownership of the native object or both will attempt to delete + * the underlying object when closed. + *

+ * When {@code disOwnNativeHandle()} is called, {@code close()} will + * subsequently take no action. As a result, incorrect use of this function + * may cause a memory leak. + *

+ */ + protected final void disOwnNativeHandle() { + owningHandle_.set(false); + } + + @Override + public void close() { + if (owningHandle_.compareAndSet(true, false)) { + disposeInternal(); + } + } + + /** + * The helper function of {@link AbstractImmutableNativeReference#close()} + * which all subclasses of {@code AbstractImmutableNativeReference} must + * implement to release their underlying native C++ objects. + */ + protected abstract void disposeInternal(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractMutableOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractMutableOptions.java new file mode 100644 index 000000000..7189272b8 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractMutableOptions.java @@ -0,0 +1,370 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +import java.util.*; + +public abstract class AbstractMutableOptions { + + protected static final String KEY_VALUE_PAIR_SEPARATOR = ";"; + protected static final char KEY_VALUE_SEPARATOR = '='; + static final String INT_ARRAY_INT_SEPARATOR = ":"; + + protected final String[] keys; + private final String[] values; + + /** + * User must use builder pattern, or parser. + * + * @param keys the keys + * @param values the values + */ + protected AbstractMutableOptions(final String[] keys, final String[] values) { + this.keys = keys; + this.values = values; + } + + String[] getKeys() { + return keys; + } + + String[] getValues() { + return values; + } + + /** + * Returns a string representation of MutableOptions which + * is suitable for consumption by {@code #parse(String)}. + * + * @return String representation of MutableOptions + */ + @Override + public String toString() { + final StringBuilder buffer = new StringBuilder(); + for(int i = 0; i < keys.length; i++) { + buffer + .append(keys[i]) + .append(KEY_VALUE_SEPARATOR) + .append(values[i]); + + if(i + 1 < keys.length) { + buffer.append(KEY_VALUE_PAIR_SEPARATOR); + } + } + return buffer.toString(); + } + + public static abstract class AbstractMutableOptionsBuilder< + T extends AbstractMutableOptions, + U extends AbstractMutableOptionsBuilder, + K extends MutableOptionKey> { + + private final Map> options = new LinkedHashMap<>(); + private final List unknown = new ArrayList<>(); + + protected abstract U self(); + + /** + * Get all of the possible keys + * + * @return A map of all keys, indexed by name. + */ + protected abstract Map allKeys(); + + /** + * Construct a sub-class instance of {@link AbstractMutableOptions}. + * + * @param keys the keys + * @param values the values + * + * @return an instance of the options. + */ + protected abstract T build(final String[] keys, final String[] values); + + public T build() { + final String[] keys = new String[options.size()]; + final String[] values = new String[options.size()]; + + int i = 0; + for (final Map.Entry> option : options.entrySet()) { + keys[i] = option.getKey().name(); + values[i] = option.getValue().asString(); + i++; + } + + return build(keys, values); + } + + protected U setDouble( + final K key, final double value) { + if (key.getValueType() != MutableOptionKey.ValueType.DOUBLE) { + throw new IllegalArgumentException( + key + " does not accept a double value"); + } + options.put(key, MutableOptionValue.fromDouble(value)); + return self(); + } + + protected double getDouble(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + return value.asDouble(); + } + + protected U setLong( + final K key, final long value) { + if(key.getValueType() != MutableOptionKey.ValueType.LONG) { + throw new IllegalArgumentException( + key + " does not accept a long value"); + } + options.put(key, MutableOptionValue.fromLong(value)); + return self(); + } + + protected long getLong(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + return value.asLong(); + } + + protected U setInt( + final K key, final int value) { + if(key.getValueType() != MutableOptionKey.ValueType.INT) { + throw new IllegalArgumentException( + key + " does not accept an integer value"); + } + options.put(key, MutableOptionValue.fromInt(value)); + return self(); + } + + protected int getInt(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + return value.asInt(); + } + + protected U setBoolean( + final K key, final boolean value) { + if(key.getValueType() != MutableOptionKey.ValueType.BOOLEAN) { + throw new IllegalArgumentException( + key + " does not accept a boolean value"); + } + options.put(key, MutableOptionValue.fromBoolean(value)); + return self(); + } + + protected boolean getBoolean(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + return value.asBoolean(); + } + + protected U setIntArray( + final K key, final int[] value) { + if(key.getValueType() != MutableOptionKey.ValueType.INT_ARRAY) { + throw new IllegalArgumentException( + key + " does not accept an int array value"); + } + options.put(key, MutableOptionValue.fromIntArray(value)); + return self(); + } + + protected int[] getIntArray(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if(value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + return value.asIntArray(); + } + + protected > U setEnum( + final K key, final N value) { + if(key.getValueType() != MutableOptionKey.ValueType.ENUM) { + throw new IllegalArgumentException( + key + " does not accept a Enum value"); + } + options.put(key, MutableOptionValue.fromEnum(value)); + return self(); + } + + @SuppressWarnings("unchecked") + protected > N getEnum(final K key) + throws NoSuchElementException, NumberFormatException { + final MutableOptionValue value = options.get(key); + if (value == null) { + throw new NoSuchElementException(key.name() + " has not been set"); + } + + if (!(value instanceof MutableOptionValue.MutableOptionEnumValue)) { + throw new NoSuchElementException(key.name() + " is not of Enum type"); + } + + return ((MutableOptionValue.MutableOptionEnumValue) value).asObject(); + } + + /** + * Parse a string into a long value, accepting values expressed as a double (such as 9.00) which + * are meant to be a long, not a double + * + * @param value the string containing a value which represents a long + * @return the long value of the parsed string + */ + private long parseAsLong(final String value) { + try { + return Long.parseLong(value); + } catch (NumberFormatException nfe) { + final double doubleValue = Double.parseDouble(value); + if (doubleValue != Math.round(doubleValue)) + throw new IllegalArgumentException("Unable to parse or round " + value + " to long"); + return Math.round(doubleValue); + } + } + + /** + * Parse a string into an int value, accepting values expressed as a double (such as 9.00) which + * are meant to be an int, not a double + * + * @param value the string containing a value which represents an int + * @return the int value of the parsed string + */ + private int parseAsInt(final String value) { + try { + return Integer.parseInt(value); + } catch (NumberFormatException nfe) { + final double doubleValue = Double.parseDouble(value); + if (doubleValue != Math.round(doubleValue)) + throw new IllegalArgumentException("Unable to parse or round " + value + " to int"); + return (int) Math.round(doubleValue); + } + } + + /** + * Constructs a builder for mutable column family options from a hierarchical parsed options + * string representation. The {@link OptionString.Parser} class output has been used to create a + * (name,value)-list; each value may be either a simple string or a (name, value)-list in turn. + * + * @param options a list of parsed option string objects + * @param ignoreUnknown what to do if the key is not one of the keys we expect + * + * @return a builder with the values from the parsed input set + * + * @throws IllegalArgumentException if an option value is of the wrong type, or a key is empty + */ + protected U fromParsed(final List options, final boolean ignoreUnknown) { + Objects.requireNonNull(options); + + for (final OptionString.Entry option : options) { + try { + if (option.key.isEmpty()) { + throw new IllegalArgumentException("options string is invalid: " + option); + } + fromOptionString(option, ignoreUnknown); + } catch (NumberFormatException nfe) { + throw new IllegalArgumentException( + "" + option.key + "=" + option.value + " - not a valid value for its type", nfe); + } + } + + return self(); + } + + /** + * Set a value in the builder from the supplied option string + * + * @param option the option key/value to add to this builder + * @param ignoreUnknown if this is not set, throw an exception when a key is not in the known + * set + * @return the same object, after adding options + * @throws IllegalArgumentException if the key is unkown, or a value has the wrong type/form + */ + private U fromOptionString(final OptionString.Entry option, final boolean ignoreUnknown) + throws IllegalArgumentException { + Objects.requireNonNull(option.key); + Objects.requireNonNull(option.value); + + final K key = allKeys().get(option.key); + if (key == null && ignoreUnknown) { + unknown.add(option); + return self(); + } else if (key == null) { + throw new IllegalArgumentException("Key: " + key + " is not a known option key"); + } + + if (!option.value.isList()) { + throw new IllegalArgumentException( + "Option: " + key + " is not a simple value or list, don't know how to parse it"); + } + + // Check that simple values are the single item in the array + if (key.getValueType() != MutableOptionKey.ValueType.INT_ARRAY) { + { + if (option.value.list.size() != 1) { + throw new IllegalArgumentException( + "Simple value does not have exactly 1 item: " + option.value.list); + } + } + } + + final List valueStrs = option.value.list; + final String valueStr = valueStrs.get(0); + + switch (key.getValueType()) { + case DOUBLE: + return setDouble(key, Double.parseDouble(valueStr)); + + case LONG: + return setLong(key, parseAsLong(valueStr)); + + case INT: + return setInt(key, parseAsInt(valueStr)); + + case BOOLEAN: + return setBoolean(key, Boolean.parseBoolean(valueStr)); + + case INT_ARRAY: + final int[] value = new int[valueStrs.size()]; + for (int i = 0; i < valueStrs.size(); i++) { + value[i] = Integer.parseInt(valueStrs.get(i)); + } + return setIntArray(key, value); + + case ENUM: + String optionName = key.name(); + if (optionName.equals("prepopulate_blob_cache")) { + final PrepopulateBlobCache prepopulateBlobCache = + PrepopulateBlobCache.getFromInternal(valueStr); + return setEnum(key, prepopulateBlobCache); + } else if (optionName.equals("compression") + || optionName.equals("blob_compression_type")) { + final CompressionType compressionType = CompressionType.getFromInternal(valueStr); + return setEnum(key, compressionType); + } else { + throw new IllegalArgumentException("Unknown enum type: " + key.name()); + } + + default: + throw new IllegalStateException(key + " has unknown value type: " + key.getValueType()); + } + } + + /** + * + * @return the list of keys encountered which were not known to the type being generated + */ + public List getUnknown() { + return new ArrayList<>(unknown); + } + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java new file mode 100644 index 000000000..88b2963b6 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java @@ -0,0 +1,48 @@ +// Copyright (c) 2016, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * AbstractNativeReference is the base-class of all RocksDB classes that have + * a pointer to a native C++ {@code rocksdb} object. + *

+ * AbstractNativeReference has the {@link AbstractNativeReference#close()} + * method, which frees its associated C++ object.

+ *

+ * This function should be called manually, or even better, called implicitly using a + * try-with-resources + * statement, when you are finished with the object. It is no longer + * called automatically during the regular Java GC process via + * {@link AbstractNativeReference#finalize()}.

+ *

+ * Explanatory note - When or if the Garbage Collector calls {@link Object#finalize()} + * depends on the JVM implementation and system conditions, which the programmer + * cannot control. In addition, the GC cannot see through the native reference + * long member variable (which is the C++ pointer value to the native object), + * and cannot know what other resources depend on it. + *

+ */ +public abstract class AbstractNativeReference implements AutoCloseable { + /** + * Returns true if we are responsible for freeing the underlying C++ object + * + * @return true if we are responsible to free the C++ object + */ + protected abstract boolean isOwningHandle(); + + /** + * Frees the underlying C++ object + *

+ * It is strong recommended that the developer calls this after they + * have finished using the object.

+ *

+ * Note, that once an instance of {@link AbstractNativeReference} has been + * closed, calling any of its functions will lead to undefined + * behavior.

+ */ + @Override public abstract void close(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java new file mode 100644 index 000000000..1aade1b89 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java @@ -0,0 +1,146 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + * Base class implementation for Rocks Iterators + * in the Java API + * + *

Multiple threads can invoke const methods on an RocksIterator without + * external synchronization, but if any of the threads may call a + * non-const method, all threads accessing the same RocksIterator must use + * external synchronization.

+ * + * @param

The type of the Parent Object from which the Rocks Iterator was + * created. This is used by disposeInternal to avoid double-free + * issues with the underlying C++ object. + * @see org.rocksdb.RocksObject + */ +public abstract class AbstractRocksIterator

+ extends RocksObject implements RocksIteratorInterface { + final P parent_; + + protected AbstractRocksIterator(final P parent, + final long nativeHandle) { + super(nativeHandle); + // parent must point to a valid RocksDB instance. + assert (parent != null); + // RocksIterator must hold a reference to the related parent instance + // to guarantee that while a GC cycle starts RocksIterator instances + // are freed prior to parent instances. + parent_ = parent; + } + + @Override + public boolean isValid() { + assert (isOwningHandle()); + return isValid0(nativeHandle_); + } + + @Override + public void seekToFirst() { + assert (isOwningHandle()); + seekToFirst0(nativeHandle_); + } + + @Override + public void seekToLast() { + assert (isOwningHandle()); + seekToLast0(nativeHandle_); + } + + @Override + public void seek(final byte[] target) { + assert (isOwningHandle()); + seek0(nativeHandle_, target, target.length); + } + + @Override + public void seekForPrev(final byte[] target) { + assert (isOwningHandle()); + seekForPrev0(nativeHandle_, target, target.length); + } + + @Override + public void seek(final ByteBuffer target) { + assert (isOwningHandle()); + if (target.isDirect()) { + seekDirect0(nativeHandle_, target, target.position(), target.remaining()); + } else { + seekByteArray0(nativeHandle_, target.array(), target.arrayOffset() + target.position(), + target.remaining()); + } + target.position(target.limit()); + } + + @Override + public void seekForPrev(final ByteBuffer target) { + assert (isOwningHandle()); + if (target.isDirect()) { + seekForPrevDirect0(nativeHandle_, target, target.position(), target.remaining()); + } else { + seekForPrevByteArray0(nativeHandle_, target.array(), target.arrayOffset() + target.position(), + target.remaining()); + } + target.position(target.limit()); + } + + @Override + public void next() { + assert (isOwningHandle()); + next0(nativeHandle_); + } + + @Override + public void prev() { + assert (isOwningHandle()); + prev0(nativeHandle_); + } + + @Override + public void refresh() throws RocksDBException { + assert (isOwningHandle()); + refresh0(nativeHandle_); + } + + @Override + public void status() throws RocksDBException { + assert (isOwningHandle()); + status0(nativeHandle_); + } + + /** + *

Deletes underlying C++ iterator pointer.

+ * + *

Note: the underlying handle can only be safely deleted if the parent + * instance related to a certain RocksIterator is still valid and initialized. + * Therefore {@code disposeInternal()} checks if the parent is initialized + * before freeing the native handle.

+ */ + @Override + protected void disposeInternal() { + if (parent_.isOwningHandle()) { + disposeInternal(nativeHandle_); + } + } + + abstract boolean isValid0(long handle); + abstract void seekToFirst0(long handle); + abstract void seekToLast0(long handle); + abstract void next0(long handle); + abstract void prev0(long handle); + abstract void refresh0(long handle) throws RocksDBException; + abstract void seek0(long handle, byte[] target, int targetLen); + abstract void seekForPrev0(long handle, byte[] target, int targetLen); + abstract void seekDirect0(long handle, ByteBuffer target, int targetOffset, int targetLen); + abstract void seekForPrevDirect0(long handle, ByteBuffer target, int targetOffset, int targetLen); + abstract void seekByteArray0(long handle, byte[] target, int targetOffset, int targetLen); + abstract void seekForPrevByteArray0(long handle, byte[] target, int targetOffset, int targetLen); + + abstract void status0(long handle) throws RocksDBException; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java new file mode 100644 index 000000000..5a22e2956 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java @@ -0,0 +1,191 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Slices are used by RocksDB to provide + * efficient access to keys and values. + * + * This class is package private, implementers + * should extend either of the public abstract classes: + * @see org.rocksdb.Slice + * @see org.rocksdb.DirectSlice + * + * Regards the lifecycle of Java Slices in RocksDB: + * At present when you configure a Comparator from Java, it creates an + * instance of a C++ BaseComparatorJniCallback subclass and + * passes that to RocksDB as the comparator. That subclass of + * BaseComparatorJniCallback creates the Java + * @see org.rocksdb.AbstractSlice subclass Objects. When you dispose + * the Java @see org.rocksdb.AbstractComparator subclass, it disposes the + * C++ BaseComparatorJniCallback subclass, which in turn destroys the + * Java @see org.rocksdb.AbstractSlice subclass Objects. + */ +public abstract class AbstractSlice extends RocksMutableObject { + + protected AbstractSlice() { + super(); + } + + protected AbstractSlice(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Returns the data of the slice. + * + * @return The slice data. Note, the type of access is + * determined by the subclass + * @see org.rocksdb.AbstractSlice#data0(long) + */ + public T data() { + return data0(getNativeHandle()); + } + + /** + * Access to the data is provided by the + * subtype as it needs to handle the + * generic typing. + * + * @param handle The address of the underlying + * native object. + * + * @return Java typed access to the data. + */ + protected abstract T data0(long handle); + + /** + * Drops the specified {@code n} + * number of bytes from the start + * of the backing slice + * + * @param n The number of bytes to drop + */ + public abstract void removePrefix(final int n); + + /** + * Clears the backing slice + */ + public abstract void clear(); + + /** + * Return the length (in bytes) of the data. + * + * @return The length in bytes. + */ + public int size() { + return size0(getNativeHandle()); + } + + /** + * Return true if the length of the + * data is zero. + * + * @return true if there is no data, false otherwise. + */ + public boolean empty() { + return empty0(getNativeHandle()); + } + + /** + * Creates a string representation of the data + * + * @param hex When true, the representation + * will be encoded in hexadecimal. + * + * @return The string representation of the data. + */ + public String toString(final boolean hex) { + return toString0(getNativeHandle(), hex); + } + + @Override + public String toString() { + return toString(false); + } + + /** + * Three-way key comparison + * + * @param other A slice to compare against + * + * @return Should return either: + * 1) < 0 if this < other + * 2) == 0 if this == other + * 3) > 0 if this > other + */ + public int compare(final AbstractSlice other) { + assert (other != null); + if(!isOwningHandle()) { + return other.isOwningHandle() ? -1 : 0; + } else { + if(!other.isOwningHandle()) { + return 1; + } else { + return compare0(getNativeHandle(), other.getNativeHandle()); + } + } + } + + @Override + public int hashCode() { + return toString().hashCode(); + } + + /** + * If other is a slice object, then + * we defer to {@link #compare(AbstractSlice) compare} + * to check equality, otherwise we return false. + * + * @param other Object to test for equality + * + * @return true when {@code this.compare(other) == 0}, + * false otherwise. + */ + @Override + public boolean equals(final Object other) { + if (other != null && other instanceof AbstractSlice) { + return compare((AbstractSlice)other) == 0; + } else { + return false; + } + } + + /** + * Determines whether this slice starts with + * another slice + * + * @param prefix Another slice which may of may not + * be a prefix of this slice. + * + * @return true when this slice starts with the + * {@code prefix} slice + */ + public boolean startsWith(final AbstractSlice prefix) { + if (prefix != null) { + return startsWith0(getNativeHandle(), prefix.getNativeHandle()); + } else { + return false; + } + } + + protected native static long createNewSliceFromString(final String str); + private native int size0(long handle); + private native boolean empty0(long handle); + private native String toString0(long handle, boolean hex); + private native int compare0(long handle, long otherHandle); + private native boolean startsWith0(long handle, long otherHandle); + + /** + * Deletes underlying C++ slice pointer. + * Note that this function should be called only after all + * RocksDB instances referencing the slice are closed. + * Otherwise an undefined behavior will occur. + */ + @Override + protected final native void disposeInternal(final long handle); + +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTableFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTableFilter.java new file mode 100644 index 000000000..c696c3e13 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTableFilter.java @@ -0,0 +1,20 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +/** + * Base class for Table Filters. + */ +public abstract class AbstractTableFilter + extends RocksCallbackObject implements TableFilter { + + protected AbstractTableFilter() { + super(); + } + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewTableFilter(); + } + + private native long createNewTableFilter(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTraceWriter.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTraceWriter.java new file mode 100644 index 000000000..806709b1f --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTraceWriter.java @@ -0,0 +1,70 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Base class for TraceWriters. + */ +public abstract class AbstractTraceWriter + extends RocksCallbackObject implements TraceWriter { + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewTraceWriter(); + } + + /** + * Called from JNI, proxy for {@link TraceWriter#write(Slice)}. + * + * @param sliceHandle the native handle of the slice (which we do not own) + * + * @return short (2 bytes) where the first byte is the + * {@link Status.Code#getValue()} and the second byte is the + * {@link Status.SubCode#getValue()}. + */ + private short writeProxy(final long sliceHandle) { + try { + write(new Slice(sliceHandle)); + return statusToShort(Status.Code.Ok, Status.SubCode.None); + } catch (final RocksDBException e) { + return statusToShort(e.getStatus()); + } + } + + /** + * Called from JNI, proxy for {@link TraceWriter#closeWriter()}. + * + * @return short (2 bytes) where the first byte is the + * {@link Status.Code#getValue()} and the second byte is the + * {@link Status.SubCode#getValue()}. + */ + private short closeWriterProxy() { + try { + closeWriter(); + return statusToShort(Status.Code.Ok, Status.SubCode.None); + } catch (final RocksDBException e) { + return statusToShort(e.getStatus()); + } + } + + private static short statusToShort(/*@Nullable*/ final Status status) { + final Status.Code code = status != null && status.getCode() != null + ? status.getCode() + : Status.Code.IOError; + final Status.SubCode subCode = status != null && status.getSubCode() != null + ? status.getSubCode() + : Status.SubCode.None; + return statusToShort(code, subCode); + } + + private static short statusToShort(final Status.Code code, + final Status.SubCode subCode) { + short result = (short)(code.getValue() << 8); + return (short)(result | subCode.getValue()); + } + + private native long createNewTraceWriter(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java new file mode 100644 index 000000000..cbb49836d --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractTransactionNotifier.java @@ -0,0 +1,54 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Provides notification to the caller of SetSnapshotOnNextOperation when + * the actual snapshot gets created + */ +public abstract class AbstractTransactionNotifier + extends RocksCallbackObject { + + protected AbstractTransactionNotifier() { + super(); + } + + /** + * Implement this method to receive notification when a snapshot is + * requested via {@link Transaction#setSnapshotOnNextOperation()}. + * + * @param newSnapshot the snapshot that has been created. + */ + public abstract void snapshotCreated(final Snapshot newSnapshot); + + /** + * This is intentionally private as it is the callback hook + * from JNI + */ + private void snapshotCreated(final long snapshotHandle) { + snapshotCreated(new Snapshot(snapshotHandle)); + } + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewTransactionNotifier(); + } + + private native long createNewTransactionNotifier(); + + /** + * Deletes underlying C++ TransactionNotifier pointer. + * + * Note that this function should be called only after all + * Transactions referencing the comparator are closed. + * Otherwise an undefined behavior will occur. + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWalFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWalFilter.java new file mode 100644 index 000000000..d525045c6 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWalFilter.java @@ -0,0 +1,49 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Base class for WAL Filters. + */ +public abstract class AbstractWalFilter + extends RocksCallbackObject implements WalFilter { + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewWalFilter(); + } + + /** + * Called from JNI, proxy for + * {@link WalFilter#logRecordFound(long, String, WriteBatch, WriteBatch)}. + * + * @param logNumber the log handle. + * @param logFileName the log file name + * @param batchHandle the native handle of a WriteBatch (which we do not own) + * @param newBatchHandle the native handle of a + * new WriteBatch (which we do not own) + * + * @return short (2 bytes) where the first byte is the + * {@link WalFilter.LogRecordFoundResult#walProcessingOption} + * {@link WalFilter.LogRecordFoundResult#batchChanged}. + */ + private short logRecordFoundProxy(final long logNumber, + final String logFileName, final long batchHandle, + final long newBatchHandle) { + final LogRecordFoundResult logRecordFoundResult = logRecordFound( + logNumber, logFileName, new WriteBatch(batchHandle), + new WriteBatch(newBatchHandle)); + return logRecordFoundResultToShort(logRecordFoundResult); + } + + private static short logRecordFoundResultToShort( + final LogRecordFoundResult logRecordFoundResult) { + short result = (short)(logRecordFoundResult.walProcessingOption.getValue() << 8); + return (short)(result | (logRecordFoundResult.batchChanged ? 1 : 0)); + } + + private native long createNewWalFilter(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java new file mode 100644 index 000000000..9527a2fd9 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java @@ -0,0 +1,204 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +public abstract class AbstractWriteBatch extends RocksObject + implements WriteBatchInterface { + + protected AbstractWriteBatch(final long nativeHandle) { + super(nativeHandle); + } + + @Override + public int count() { + return count0(nativeHandle_); + } + + @Override + public void put(byte[] key, byte[] value) throws RocksDBException { + put(nativeHandle_, key, key.length, value, value.length); + } + + @Override + public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, + byte[] value) throws RocksDBException { + put(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + @Override + public void merge(byte[] key, byte[] value) throws RocksDBException { + merge(nativeHandle_, key, key.length, value, value.length); + } + + @Override + public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, + byte[] value) throws RocksDBException { + merge(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + @Override + public void put(final ByteBuffer key, final ByteBuffer value) throws RocksDBException { + assert key.isDirect() && value.isDirect(); + putDirect(nativeHandle_, key, key.position(), key.remaining(), value, value.position(), + value.remaining(), 0); + key.position(key.limit()); + value.position(value.limit()); + } + + @Override + public void put(ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key, + final ByteBuffer value) throws RocksDBException { + assert key.isDirect() && value.isDirect(); + putDirect(nativeHandle_, key, key.position(), key.remaining(), value, value.position(), + value.remaining(), columnFamilyHandle.nativeHandle_); + key.position(key.limit()); + value.position(value.limit()); + } + + @Override + public void delete(byte[] key) throws RocksDBException { + delete(nativeHandle_, key, key.length); + } + + @Override + public void delete(ColumnFamilyHandle columnFamilyHandle, byte[] key) + throws RocksDBException { + delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); + } + + @Override + public void delete(final ByteBuffer key) throws RocksDBException { + deleteDirect(nativeHandle_, key, key.position(), key.remaining(), 0); + key.position(key.limit()); + } + + @Override + public void delete(ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key) + throws RocksDBException { + deleteDirect( + nativeHandle_, key, key.position(), key.remaining(), columnFamilyHandle.nativeHandle_); + key.position(key.limit()); + } + + @Override + public void singleDelete(byte[] key) throws RocksDBException { + singleDelete(nativeHandle_, key, key.length); + } + + @Override + public void singleDelete(ColumnFamilyHandle columnFamilyHandle, byte[] key) + throws RocksDBException { + singleDelete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); + } + + @Override + public void deleteRange(byte[] beginKey, byte[] endKey) + throws RocksDBException { + deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length); + } + + @Override + public void deleteRange(ColumnFamilyHandle columnFamilyHandle, + byte[] beginKey, byte[] endKey) throws RocksDBException { + deleteRange(nativeHandle_, beginKey, beginKey.length, endKey, endKey.length, + columnFamilyHandle.nativeHandle_); + } + + @Override + public void putLogData(byte[] blob) throws RocksDBException { + putLogData(nativeHandle_, blob, blob.length); + } + + @Override + public void clear() { + clear0(nativeHandle_); + } + + @Override + public void setSavePoint() { + setSavePoint0(nativeHandle_); + } + + @Override + public void rollbackToSavePoint() throws RocksDBException { + rollbackToSavePoint0(nativeHandle_); + } + + @Override + public void popSavePoint() throws RocksDBException { + popSavePoint(nativeHandle_); + } + + @Override + public void setMaxBytes(final long maxBytes) { + setMaxBytes(nativeHandle_, maxBytes); + } + + @Override + public WriteBatch getWriteBatch() { + return getWriteBatch(nativeHandle_); + } + + abstract int count0(final long handle); + + abstract void put(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen) throws RocksDBException; + + abstract void put(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen, final long cfHandle) + throws RocksDBException; + + abstract void putDirect(final long handle, final ByteBuffer key, final int keyOffset, + final int keyLength, final ByteBuffer value, final int valueOffset, final int valueLength, + final long cfHandle) throws RocksDBException; + + abstract void merge(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen) throws RocksDBException; + + abstract void merge(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen, final long cfHandle) + throws RocksDBException; + + abstract void delete(final long handle, final byte[] key, + final int keyLen) throws RocksDBException; + + abstract void delete(final long handle, final byte[] key, + final int keyLen, final long cfHandle) throws RocksDBException; + + abstract void singleDelete(final long handle, final byte[] key, final int keyLen) + throws RocksDBException; + + abstract void singleDelete(final long handle, final byte[] key, final int keyLen, + final long cfHandle) throws RocksDBException; + + abstract void deleteDirect(final long handle, final ByteBuffer key, final int keyOffset, + final int keyLength, final long cfHandle) throws RocksDBException; + + abstract void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen, + final byte[] endKey, final int endKeyLen) throws RocksDBException; + + abstract void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen, + final byte[] endKey, final int endKeyLen, final long cfHandle) throws RocksDBException; + + abstract void putLogData(final long handle, final byte[] blob, + final int blobLen) throws RocksDBException; + + abstract void clear0(final long handle); + + abstract void setSavePoint0(final long handle); + + abstract void rollbackToSavePoint0(final long handle); + + abstract void popSavePoint(final long handle) throws RocksDBException; + + abstract void setMaxBytes(final long handle, long maxBytes); + + abstract WriteBatch getWriteBatch(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java b/src/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java new file mode 100644 index 000000000..877c4ab39 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AccessHint.java @@ -0,0 +1,53 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * File access pattern once a compaction has started + */ +public enum AccessHint { + NONE((byte)0x0), + NORMAL((byte)0x1), + SEQUENTIAL((byte)0x2), + WILLNEED((byte)0x3); + + private final byte value; + + AccessHint(final byte value) { + this.value = value; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + *

Get the AccessHint enumeration value by + * passing the byte identifier to this method.

+ * + * @param byteIdentifier of AccessHint. + * + * @return AccessHint instance. + * + * @throws IllegalArgumentException if the access hint for the byteIdentifier + * cannot be found + */ + public static AccessHint getAccessHint(final byte byteIdentifier) { + for (final AccessHint accessHint : AccessHint.values()) { + if (accessHint.getValue() == byteIdentifier) { + return accessHint; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for AccessHint."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java new file mode 100644 index 000000000..5338bc42d --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedColumnFamilyOptionsInterface.java @@ -0,0 +1,464 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; + +/** + * Advanced Column Family Options which are not + * mutable (i.e. present in {@link AdvancedMutableColumnFamilyOptionsInterface} + * + * Taken from include/rocksdb/advanced_options.h + */ +public interface AdvancedColumnFamilyOptionsInterface< + T extends AdvancedColumnFamilyOptionsInterface> { + /** + * The minimum number of write buffers that will be merged together + * before writing to storage. If set to 1, then + * all write buffers are flushed to L0 as individual files and this increases + * read amplification because a get request has to check in all of these + * files. Also, an in-memory merge may result in writing lesser + * data to storage if there are duplicate records in each of these + * individual write buffers. Default: 1 + * + * @param minWriteBufferNumberToMerge the minimum number of write buffers + * that will be merged together. + * @return the reference to the current options. + */ + T setMinWriteBufferNumberToMerge( + int minWriteBufferNumberToMerge); + + /** + * The minimum number of write buffers that will be merged together + * before writing to storage. If set to 1, then + * all write buffers are flushed to L0 as individual files and this increases + * read amplification because a get request has to check in all of these + * files. Also, an in-memory merge may result in writing lesser + * data to storage if there are duplicate records in each of these + * individual write buffers. Default: 1 + * + * @return the minimum number of write buffers that will be merged together. + */ + int minWriteBufferNumberToMerge(); + + /** + * The total maximum number of write buffers to maintain in memory including + * copies of buffers that have already been flushed. Unlike + * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()}, + * this parameter does not affect flushing. + * This controls the minimum amount of write history that will be available + * in memory for conflict checking when Transactions are used. + * + * When using an OptimisticTransactionDB: + * If this value is too low, some transactions may fail at commit time due + * to not being able to determine whether there were any write conflicts. + * + * When using a TransactionDB: + * If Transaction::SetSnapshot is used, TransactionDB will read either + * in-memory write buffers or SST files to do write-conflict checking. + * Increasing this value can reduce the number of reads to SST files + * done for conflict detection. + * + * Setting this value to 0 will cause write buffers to be freed immediately + * after they are flushed. + * If this value is set to -1, + * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()} + * will be used. + * + * Default: + * If using a TransactionDB/OptimisticTransactionDB, the default value will + * be set to the value of + * {@link AdvancedMutableColumnFamilyOptionsInterface#maxWriteBufferNumber()} + * if it is not explicitly set by the user. Otherwise, the default is 0. + * + * @param maxWriteBufferNumberToMaintain The maximum number of write + * buffers to maintain + * + * @return the reference to the current options. + */ + T setMaxWriteBufferNumberToMaintain( + int maxWriteBufferNumberToMaintain); + + /** + * The total maximum number of write buffers to maintain in memory including + * copies of buffers that have already been flushed. + * + * @return maxWriteBufferNumberToMaintain The maximum number of write buffers + * to maintain + */ + int maxWriteBufferNumberToMaintain(); + + /** + * Allows thread-safe inplace updates. + * If inplace_callback function is not set, + * Put(key, new_value) will update inplace the existing_value iff + * * key exists in current memtable + * * new sizeof(new_value) ≤ sizeof(existing_value) + * * existing_value for that key is a put i.e. kTypeValue + * If inplace_callback function is set, check doc for inplace_callback. + * Default: false. + * + * @param inplaceUpdateSupport true if thread-safe inplace updates + * are allowed. + * @return the reference to the current options. + */ + T setInplaceUpdateSupport( + boolean inplaceUpdateSupport); + + /** + * Allows thread-safe inplace updates. + * If inplace_callback function is not set, + * Put(key, new_value) will update inplace the existing_value iff + * * key exists in current memtable + * * new sizeof(new_value) ≤ sizeof(existing_value) + * * existing_value for that key is a put i.e. kTypeValue + * If inplace_callback function is set, check doc for inplace_callback. + * Default: false. + * + * @return true if thread-safe inplace updates are allowed. + */ + boolean inplaceUpdateSupport(); + + /** + * Control locality of bloom filter probes to improve cache miss rate. + * This option only applies to memtable prefix bloom and plaintable + * prefix bloom. It essentially limits the max number of cache lines each + * bloom filter check can touch. + * This optimization is turned off when set to 0. The number should never + * be greater than number of probes. This option can boost performance + * for in-memory workload but should use with care since it can cause + * higher false positive rate. + * Default: 0 + * + * @param bloomLocality the level of locality of bloom-filter probes. + * @return the reference to the current options. + */ + T setBloomLocality(int bloomLocality); + + /** + * Control locality of bloom filter probes to improve cache miss rate. + * This option only applies to memtable prefix bloom and plaintable + * prefix bloom. It essentially limits the max number of cache lines each + * bloom filter check can touch. + * This optimization is turned off when set to 0. The number should never + * be greater than number of probes. This option can boost performance + * for in-memory workload but should use with care since it can cause + * higher false positive rate. + * Default: 0 + * + * @return the level of locality of bloom-filter probes. + * @see #setBloomLocality(int) + */ + int bloomLocality(); + + /** + *

Different levels can have different compression + * policies. There are cases where most lower levels + * would like to use quick compression algorithms while + * the higher levels (which have more data) use + * compression algorithms that have better compression + * but could be slower. This array, if non-empty, should + * have an entry for each level of the database; + * these override the value specified in the previous + * field 'compression'.

+ * + * NOTICE + *

If {@code level_compaction_dynamic_level_bytes=true}, + * {@code compression_per_level[0]} still determines {@code L0}, + * but other elements of the array are based on base level + * (the level {@code L0} files are merged to), and may not + * match the level users see from info log for metadata. + *

+ *

If {@code L0} files are merged to {@code level - n}, + * then, for {@code i>0}, {@code compression_per_level[i]} + * determines compaction type for level {@code n+i-1}.

+ * + * Example + *

For example, if we have 5 levels, and we determine to + * merge {@code L0} data to {@code L4} (which means {@code L1..L3} + * will be empty), then the new files go to {@code L4} uses + * compression type {@code compression_per_level[1]}.

+ * + *

If now {@code L0} is merged to {@code L2}. Data goes to + * {@code L2} will be compressed according to + * {@code compression_per_level[1]}, {@code L3} using + * {@code compression_per_level[2]}and {@code L4} using + * {@code compression_per_level[3]}. Compaction for each + * level can change when data grows.

+ * + *

Default: empty

+ * + * @param compressionLevels list of + * {@link org.rocksdb.CompressionType} instances. + * + * @return the reference to the current options. + */ + T setCompressionPerLevel( + List compressionLevels); + + /** + *

Return the currently set {@link org.rocksdb.CompressionType} + * per instances.

+ * + *

See: {@link #setCompressionPerLevel(java.util.List)}

+ * + * @return list of {@link org.rocksdb.CompressionType} + * instances. + */ + List compressionPerLevel(); + + /** + * Set the number of levels for this database + * If level-styled compaction is used, then this number determines + * the total number of levels. + * + * @param numLevels the number of levels. + * @return the reference to the current options. + */ + T setNumLevels(int numLevels); + + /** + * If level-styled compaction is used, then this number determines + * the total number of levels. + * + * @return the number of levels. + */ + int numLevels(); + + /** + *

If {@code true}, RocksDB will pick target size of each level + * dynamically. We will pick a base level b >= 1. L0 will be + * directly merged into level b, instead of always into level 1. + * Level 1 to b-1 need to be empty. We try to pick b and its target + * size so that

+ * + *
    + *
  1. target size is in the range of + * (max_bytes_for_level_base / max_bytes_for_level_multiplier, + * max_bytes_for_level_base]
  2. + *
  3. target size of the last level (level num_levels-1) equals to extra size + * of the level.
  4. + *
+ * + *

At the same time max_bytes_for_level_multiplier and + * max_bytes_for_level_multiplier_additional are still satisfied.

+ * + *

With this option on, from an empty DB, we make last level the base + * level, which means merging L0 data into the last level, until it exceeds + * max_bytes_for_level_base. And then we make the second last level to be + * base level, to start to merge L0 data to second last level, with its + * target size to be {@code 1/max_bytes_for_level_multiplier} of the last + * levels extra size. After the data accumulates more so that we need to + * move the base level to the third last one, and so on.

+ * + *

Example

+ * + *

For example, assume {@code max_bytes_for_level_multiplier=10}, + * {@code num_levels=6}, and {@code max_bytes_for_level_base=10MB}.

+ * + *

Target sizes of level 1 to 5 starts with:

+ * {@code [- - - - 10MB]} + *

with base level is level. Target sizes of level 1 to 4 are not applicable + * because they will not be used. + * Until the size of Level 5 grows to more than 10MB, say 11MB, we make + * base target to level 4 and now the targets looks like:

+ * {@code [- - - 1.1MB 11MB]} + *

While data are accumulated, size targets are tuned based on actual data + * of level 5. When level 5 has 50MB of data, the target is like:

+ * {@code [- - - 5MB 50MB]} + *

Until level 5's actual size is more than 100MB, say 101MB. Now if we + * keep level 4 to be the base level, its target size needs to be 10.1MB, + * which doesn't satisfy the target size range. So now we make level 3 + * the target size and the target sizes of the levels look like:

+ * {@code [- - 1.01MB 10.1MB 101MB]} + *

In the same way, while level 5 further grows, all levels' targets grow, + * like

+ * {@code [- - 5MB 50MB 500MB]} + *

Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the + * base level and make levels' target sizes like this:

+ * {@code [- 1.001MB 10.01MB 100.1MB 1001MB]} + *

and go on...

+ * + *

By doing it, we give {@code max_bytes_for_level_multiplier} a priority + * against {@code max_bytes_for_level_base}, for a more predictable LSM tree + * shape. It is useful to limit worse case space amplification.

+ * + *

{@code max_bytes_for_level_multiplier_additional} is ignored with + * this flag on.

+ * + *

Turning this feature on or off for an existing DB can cause unexpected + * LSM tree structure so it's not recommended.

+ * + *

Caution: this option is experimental

+ * + *

Default: false

+ * + * @param enableLevelCompactionDynamicLevelBytes boolean value indicating + * if {@code LevelCompactionDynamicLevelBytes} shall be enabled. + * @return the reference to the current options. + */ + @Experimental("Turning this feature on or off for an existing DB can cause" + + " unexpected LSM tree structure so it's not recommended") + T setLevelCompactionDynamicLevelBytes( + boolean enableLevelCompactionDynamicLevelBytes); + + /** + *

Return if {@code LevelCompactionDynamicLevelBytes} is enabled. + *

+ * + *

For further information see + * {@link #setLevelCompactionDynamicLevelBytes(boolean)}

+ * + * @return boolean value indicating if + * {@code levelCompactionDynamicLevelBytes} is enabled. + */ + @Experimental("Caution: this option is experimental") + boolean levelCompactionDynamicLevelBytes(); + + /** + * Maximum size of each compaction (not guarantee) + * + * @param maxCompactionBytes the compaction size limit + * @return the reference to the current options. + */ + T setMaxCompactionBytes( + long maxCompactionBytes); + + /** + * Control maximum size of each compaction (not guaranteed) + * + * @return compaction size threshold + */ + long maxCompactionBytes(); + + /** + * Set compaction style for DB. + * + * Default: LEVEL. + * + * @param compactionStyle Compaction style. + * @return the reference to the current options. + */ + ColumnFamilyOptionsInterface setCompactionStyle( + CompactionStyle compactionStyle); + + /** + * Compaction style for DB. + * + * @return Compaction style. + */ + CompactionStyle compactionStyle(); + + /** + * If level {@link #compactionStyle()} == {@link CompactionStyle#LEVEL}, + * for each level, which files are prioritized to be picked to compact. + * + * Default: {@link CompactionPriority#ByCompensatedSize} + * + * @param compactionPriority The compaction priority + * + * @return the reference to the current options. + */ + T setCompactionPriority( + CompactionPriority compactionPriority); + + /** + * Get the Compaction priority if level compaction + * is used for all levels + * + * @return The compaction priority + */ + CompactionPriority compactionPriority(); + + /** + * Set the options needed to support Universal Style compactions + * + * @param compactionOptionsUniversal The Universal Style compaction options + * + * @return the reference to the current options. + */ + T setCompactionOptionsUniversal( + CompactionOptionsUniversal compactionOptionsUniversal); + + /** + * The options needed to support Universal Style compactions + * + * @return The Universal Style compaction options + */ + CompactionOptionsUniversal compactionOptionsUniversal(); + + /** + * The options for FIFO compaction style + * + * @param compactionOptionsFIFO The FIFO compaction options + * + * @return the reference to the current options. + */ + T setCompactionOptionsFIFO( + CompactionOptionsFIFO compactionOptionsFIFO); + + /** + * The options for FIFO compaction style + * + * @return The FIFO compaction options + */ + CompactionOptionsFIFO compactionOptionsFIFO(); + + /** + *

This flag specifies that the implementation should optimize the filters + * mainly for cases where keys are found rather than also optimize for keys + * missed. This would be used in cases where the application knows that + * there are very few misses or the performance in the case of misses is not + * important.

+ * + *

For now, this flag allows us to not store filters for the last level i.e + * the largest level which contains data of the LSM store. For keys which + * are hits, the filters in this level are not useful because we will search + * for the data anyway.

+ * + *

NOTE: the filters in other levels are still useful + * even for key hit because they tell us whether to look in that level or go + * to the higher level.

+ * + *

Default: false

+ * + * @param optimizeFiltersForHits boolean value indicating if this flag is set. + * @return the reference to the current options. + */ + T setOptimizeFiltersForHits( + boolean optimizeFiltersForHits); + + /** + *

Returns the current state of the {@code optimize_filters_for_hits} + * setting.

+ * + * @return boolean value indicating if the flag + * {@code optimize_filters_for_hits} was set. + */ + boolean optimizeFiltersForHits(); + + /** + * By default, RocksDB runs consistency checks on the LSM every time the LSM + * changes (Flush, Compaction, AddFile). Use this option if you need to + * disable them. + * + * Default: true + * + * @param forceConsistencyChecks false to disable consistency checks + * + * @return the reference to the current options. + */ + T setForceConsistencyChecks( + boolean forceConsistencyChecks); + + /** + * By default, RocksDB runs consistency checks on the LSM every time the LSM + * changes (Flush, Compaction, AddFile). + * + * @return true if consistency checks are enforced + */ + boolean forceConsistencyChecks(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java new file mode 100644 index 000000000..162d15d80 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/AdvancedMutableColumnFamilyOptionsInterface.java @@ -0,0 +1,830 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Advanced Column Family Options which are mutable + * + * Taken from include/rocksdb/advanced_options.h + * and MutableCFOptions in util/cf_options.h + */ +public interface AdvancedMutableColumnFamilyOptionsInterface< + T extends AdvancedMutableColumnFamilyOptionsInterface> { + /** + * The maximum number of write buffers that are built up in memory. + * The default is 2, so that when 1 write buffer is being flushed to + * storage, new writes can continue to the other write buffer. + * Default: 2 + * + * @param maxWriteBufferNumber maximum number of write buffers. + * @return the instance of the current options. + */ + T setMaxWriteBufferNumber( + int maxWriteBufferNumber); + + /** + * Returns maximum number of write buffers. + * + * @return maximum number of write buffers. + * @see #setMaxWriteBufferNumber(int) + */ + int maxWriteBufferNumber(); + + /** + * Number of locks used for inplace update + * Default: 10000, if inplace_update_support = true, else 0. + * + * @param inplaceUpdateNumLocks the number of locks used for + * inplace updates. + * @return the reference to the current options. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setInplaceUpdateNumLocks( + long inplaceUpdateNumLocks); + + /** + * Number of locks used for inplace update + * Default: 10000, if inplace_update_support = true, else 0. + * + * @return the number of locks used for inplace update. + */ + long inplaceUpdateNumLocks(); + + /** + * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, + * create prefix bloom for memtable with the size of + * write_buffer_size * memtable_prefix_bloom_size_ratio. + * If it is larger than 0.25, it is santinized to 0.25. + * + * Default: 0 (disabled) + * + * @param memtablePrefixBloomSizeRatio the ratio of memtable used by the + * bloom filter, 0 means no bloom filter + * @return the reference to the current options. + */ + T setMemtablePrefixBloomSizeRatio( + double memtablePrefixBloomSizeRatio); + + /** + * if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, + * create prefix bloom for memtable with the size of + * write_buffer_size * memtable_prefix_bloom_size_ratio. + * If it is larger than 0.25, it is santinized to 0.25. + * + * Default: 0 (disabled) + * + * @return the ratio of memtable used by the bloom filter + */ + double memtablePrefixBloomSizeRatio(); + + /** + * Threshold used in the MemPurge (memtable garbage collection) + * feature. A value of 0.0 corresponds to no MemPurge, + * a value of 1.0 will trigger a MemPurge as often as possible. + * + * Default: 0.0 (disabled) + * + * @param experimentalMempurgeThreshold the threshold used by + * the MemPurge decider. + * @return the reference to the current options. + */ + T setExperimentalMempurgeThreshold(double experimentalMempurgeThreshold); + + /** + * Threshold used in the MemPurge (memtable garbage collection) + * feature. A value of 0.0 corresponds to no MemPurge, + * a value of 1.0 will trigger a MemPurge as often as possible. + * + * Default: 0 (disabled) + * + * @return the threshold used by the MemPurge decider + */ + double experimentalMempurgeThreshold(); + + /** + * Enable whole key bloom filter in memtable. Note this will only take effect + * if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering + * can potentially reduce CPU usage for point-look-ups. + * + * Default: false (disabled) + * + * @param memtableWholeKeyFiltering true if whole key bloom filter is enabled + * in memtable + * @return the reference to the current options. + */ + T setMemtableWholeKeyFiltering(boolean memtableWholeKeyFiltering); + + /** + * Returns whether whole key bloom filter is enabled in memtable + * + * @return true if whole key bloom filter is enabled in memtable + */ + boolean memtableWholeKeyFiltering(); + + /** + * Page size for huge page TLB for bloom in memtable. If ≤ 0, not allocate + * from huge page TLB but from malloc. + * Need to reserve huge pages for it to be allocated. For example: + * sysctl -w vm.nr_hugepages=20 + * See linux doc Documentation/vm/hugetlbpage.txt + * + * @param memtableHugePageSize The page size of the huge + * page tlb + * @return the reference to the current options. + */ + T setMemtableHugePageSize( + long memtableHugePageSize); + + /** + * Page size for huge page TLB for bloom in memtable. If ≤ 0, not allocate + * from huge page TLB but from malloc. + * Need to reserve huge pages for it to be allocated. For example: + * sysctl -w vm.nr_hugepages=20 + * See linux doc Documentation/vm/hugetlbpage.txt + * + * @return The page size of the huge page tlb + */ + long memtableHugePageSize(); + + /** + * The size of one block in arena memory allocation. + * If ≤ 0, a proper value is automatically calculated (usually 1/10 of + * writer_buffer_size). + * + * There are two additional restriction of the specified size: + * (1) size should be in the range of [4096, 2 << 30] and + * (2) be the multiple of the CPU word (which helps with the memory + * alignment). + * + * We'll automatically check and adjust the size number to make sure it + * conforms to the restrictions. + * Default: 0 + * + * @param arenaBlockSize the size of an arena block + * @return the reference to the current options. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setArenaBlockSize(long arenaBlockSize); + + /** + * The size of one block in arena memory allocation. + * If ≤ 0, a proper value is automatically calculated (usually 1/10 of + * writer_buffer_size). + * + * There are two additional restriction of the specified size: + * (1) size should be in the range of [4096, 2 << 30] and + * (2) be the multiple of the CPU word (which helps with the memory + * alignment). + * + * We'll automatically check and adjust the size number to make sure it + * conforms to the restrictions. + * Default: 0 + * + * @return the size of an arena block + */ + long arenaBlockSize(); + + /** + * Soft limit on number of level-0 files. We start slowing down writes at this + * point. A value < 0 means that no writing slow down will be triggered by + * number of files in level-0. + * + * @param level0SlowdownWritesTrigger The soft limit on the number of + * level-0 files + * @return the reference to the current options. + */ + T setLevel0SlowdownWritesTrigger( + int level0SlowdownWritesTrigger); + + /** + * Soft limit on number of level-0 files. We start slowing down writes at this + * point. A value < 0 means that no writing slow down will be triggered by + * number of files in level-0. + * + * @return The soft limit on the number of + * level-0 files + */ + int level0SlowdownWritesTrigger(); + + /** + * Maximum number of level-0 files. We stop writes at this point. + * + * @param level0StopWritesTrigger The maximum number of level-0 files + * @return the reference to the current options. + */ + T setLevel0StopWritesTrigger( + int level0StopWritesTrigger); + + /** + * Maximum number of level-0 files. We stop writes at this point. + * + * @return The maximum number of level-0 files + */ + int level0StopWritesTrigger(); + + /** + * The target file size for compaction. + * This targetFileSizeBase determines a level-1 file size. + * Target file size for level L can be calculated by + * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) + * For example, if targetFileSizeBase is 2MB and + * target_file_size_multiplier is 10, then each file on level-1 will + * be 2MB, and each file on level 2 will be 20MB, + * and each file on level-3 will be 200MB. + * by default targetFileSizeBase is 64MB. + * + * @param targetFileSizeBase the target size of a level-0 file. + * @return the reference to the current options. + * + * @see #setTargetFileSizeMultiplier(int) + */ + T setTargetFileSizeBase( + long targetFileSizeBase); + + /** + * The target file size for compaction. + * This targetFileSizeBase determines a level-1 file size. + * Target file size for level L can be calculated by + * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) + * For example, if targetFileSizeBase is 2MB and + * target_file_size_multiplier is 10, then each file on level-1 will + * be 2MB, and each file on level 2 will be 20MB, + * and each file on level-3 will be 200MB. + * by default targetFileSizeBase is 64MB. + * + * @return the target size of a level-0 file. + * + * @see #targetFileSizeMultiplier() + */ + long targetFileSizeBase(); + + /** + * targetFileSizeMultiplier defines the size ratio between a + * level-L file and level-(L+1) file. + * By default target_file_size_multiplier is 1, meaning + * files in different levels have the same target. + * + * @param multiplier the size ratio between a level-(L+1) file + * and level-L file. + * @return the reference to the current options. + */ + T setTargetFileSizeMultiplier( + int multiplier); + + /** + * targetFileSizeMultiplier defines the size ratio between a + * level-(L+1) file and level-L file. + * By default targetFileSizeMultiplier is 1, meaning + * files in different levels have the same target. + * + * @return the size ratio between a level-(L+1) file and level-L file. + */ + int targetFileSizeMultiplier(); + + /** + * The ratio between the total size of level-(L+1) files and the total + * size of level-L files for all L. + * DEFAULT: 10 + * + * @param multiplier the ratio between the total size of level-(L+1) + * files and the total size of level-L files for all L. + * @return the reference to the current options. + * + * See {@link MutableColumnFamilyOptionsInterface#setMaxBytesForLevelBase(long)} + */ + T setMaxBytesForLevelMultiplier(double multiplier); + + /** + * The ratio between the total size of level-(L+1) files and the total + * size of level-L files for all L. + * DEFAULT: 10 + * + * @return the ratio between the total size of level-(L+1) files and + * the total size of level-L files for all L. + * + * See {@link MutableColumnFamilyOptionsInterface#maxBytesForLevelBase()} + */ + double maxBytesForLevelMultiplier(); + + /** + * Different max-size multipliers for different levels. + * These are multiplied by max_bytes_for_level_multiplier to arrive + * at the max-size of each level. + * + * Default: 1 + * + * @param maxBytesForLevelMultiplierAdditional The max-size multipliers + * for each level + * @return the reference to the current options. + */ + T setMaxBytesForLevelMultiplierAdditional( + int[] maxBytesForLevelMultiplierAdditional); + + /** + * Different max-size multipliers for different levels. + * These are multiplied by max_bytes_for_level_multiplier to arrive + * at the max-size of each level. + * + * Default: 1 + * + * @return The max-size multipliers for each level + */ + int[] maxBytesForLevelMultiplierAdditional(); + + /** + * All writes will be slowed down to at least delayed_write_rate if estimated + * bytes needed to be compaction exceed this threshold. + * + * Default: 64GB + * + * @param softPendingCompactionBytesLimit The soft limit to impose on + * compaction + * @return the reference to the current options. + */ + T setSoftPendingCompactionBytesLimit( + long softPendingCompactionBytesLimit); + + /** + * All writes will be slowed down to at least delayed_write_rate if estimated + * bytes needed to be compaction exceed this threshold. + * + * Default: 64GB + * + * @return The soft limit to impose on compaction + */ + long softPendingCompactionBytesLimit(); + + /** + * All writes are stopped if estimated bytes needed to be compaction exceed + * this threshold. + * + * Default: 256GB + * + * @param hardPendingCompactionBytesLimit The hard limit to impose on + * compaction + * @return the reference to the current options. + */ + T setHardPendingCompactionBytesLimit( + long hardPendingCompactionBytesLimit); + + /** + * All writes are stopped if estimated bytes needed to be compaction exceed + * this threshold. + * + * Default: 256GB + * + * @return The hard limit to impose on compaction + */ + long hardPendingCompactionBytesLimit(); + + /** + * An iteration->Next() sequentially skips over keys with the same + * user-key unless this option is set. This number specifies the number + * of keys (with the same userkey) that will be sequentially + * skipped before a reseek is issued. + * Default: 8 + * + * @param maxSequentialSkipInIterations the number of keys could + * be skipped in a iteration. + * @return the reference to the current options. + */ + T setMaxSequentialSkipInIterations( + long maxSequentialSkipInIterations); + + /** + * An iteration->Next() sequentially skips over keys with the same + * user-key unless this option is set. This number specifies the number + * of keys (with the same userkey) that will be sequentially + * skipped before a reseek is issued. + * Default: 8 + * + * @return the number of keys could be skipped in a iteration. + */ + long maxSequentialSkipInIterations(); + + /** + * Maximum number of successive merge operations on a key in the memtable. + * + * When a merge operation is added to the memtable and the maximum number of + * successive merges is reached, the value of the key will be calculated and + * inserted into the memtable instead of the merge operation. This will + * ensure that there are never more than max_successive_merges merge + * operations in the memtable. + * + * Default: 0 (disabled) + * + * @param maxSuccessiveMerges the maximum number of successive merges. + * @return the reference to the current options. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setMaxSuccessiveMerges( + long maxSuccessiveMerges); + + /** + * Maximum number of successive merge operations on a key in the memtable. + * + * When a merge operation is added to the memtable and the maximum number of + * successive merges is reached, the value of the key will be calculated and + * inserted into the memtable instead of the merge operation. This will + * ensure that there are never more than max_successive_merges merge + * operations in the memtable. + * + * Default: 0 (disabled) + * + * @return the maximum number of successive merges. + */ + long maxSuccessiveMerges(); + + /** + * After writing every SST file, reopen it and read all the keys. + * + * Default: false + * + * @param paranoidFileChecks true to enable paranoid file checks + * @return the reference to the current options. + */ + T setParanoidFileChecks( + boolean paranoidFileChecks); + + /** + * After writing every SST file, reopen it and read all the keys. + * + * Default: false + * + * @return true if paranoid file checks are enabled + */ + boolean paranoidFileChecks(); + + /** + * Measure IO stats in compactions and flushes, if true. + * + * Default: false + * + * @param reportBgIoStats true to enable reporting + * @return the reference to the current options. + */ + T setReportBgIoStats( + boolean reportBgIoStats); + + /** + * Determine whether IO stats in compactions and flushes are being measured + * + * @return true if reporting is enabled + */ + boolean reportBgIoStats(); + + /** + * Non-bottom-level files older than TTL will go through the compaction + * process. This needs {@link MutableDBOptionsInterface#maxOpenFiles()} to be + * set to -1. + * + * Enabled only for level compaction for now. + * + * Default: 0 (disabled) + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param ttl the time-to-live. + * + * @return the reference to the current options. + */ + T setTtl(final long ttl); + + /** + * Get the TTL for Non-bottom-level files that will go through the compaction + * process. + * + * See {@link #setTtl(long)}. + * + * @return the time-to-live. + */ + long ttl(); + + /** + * Files older than this value will be picked up for compaction, and + * re-written to the same level as they were before. + * One main use of the feature is to make sure a file goes through compaction + * filters periodically. Users can also use the feature to clear up SST + * files using old format. + * + * A file's age is computed by looking at file_creation_time or creation_time + * table properties in order, if they have valid non-zero values; if not, the + * age is based on the file's last modified time (given by the underlying + * Env). + * + * Supported in Level and FIFO compaction. + * In FIFO compaction, this option has the same meaning as TTL and whichever + * stricter will be used. + * Pre-req: max_open_file == -1. + * unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60 + * + * Values: + * 0: Turn off Periodic compactions. + * UINT64_MAX - 1 (i.e 0xfffffffffffffffe): Let RocksDB control this feature + * as needed. For now, RocksDB will change this value to 30 days + * (i.e 30 * 24 * 60 * 60) so that every file goes through the compaction + * process at least once every 30 days if not compacted sooner. + * In FIFO compaction, since the option has the same meaning as ttl, + * when this value is left default, and ttl is left to 0, 30 days will be + * used. Otherwise, min(ttl, periodic_compaction_seconds) will be used. + * + * Default: 0xfffffffffffffffe (allow RocksDB to auto-tune) + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param periodicCompactionSeconds the periodic compaction in seconds. + * + * @return the reference to the current options. + */ + T setPeriodicCompactionSeconds(final long periodicCompactionSeconds); + + /** + * Get the periodicCompactionSeconds. + * + * See {@link #setPeriodicCompactionSeconds(long)}. + * + * @return the periodic compaction in seconds. + */ + long periodicCompactionSeconds(); + + // + // BEGIN options for blobs (integrated BlobDB) + // + + /** + * When set, large values (blobs) are written to separate blob files, and only + * pointers to them are stored in SST files. This can reduce write amplification + * for large-value use cases at the cost of introducing a level of indirection + * for reads. See also the options min_blob_size, blob_file_size, + * blob_compression_type, enable_blob_garbage_collection, and + * blob_garbage_collection_age_cutoff below. + * + * Default: false + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param enableBlobFiles true iff blob files should be enabled + * + * @return the reference to the current options. + */ + T setEnableBlobFiles(final boolean enableBlobFiles); + + /** + * When set, large values (blobs) are written to separate blob files, and only + * pointers to them are stored in SST files. This can reduce write amplification + * for large-value use cases at the cost of introducing a level of indirection + * for reads. See also the options min_blob_size, blob_file_size, + * blob_compression_type, enable_blob_garbage_collection, and + * blob_garbage_collection_age_cutoff below. + * + * Default: false + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @return true if blob files are enabled + */ + boolean enableBlobFiles(); + + /** + * Set the size of the smallest value to be stored separately in a blob file. Values + * which have an uncompressed size smaller than this threshold are stored + * alongside the keys in SST files in the usual fashion. A value of zero for + * this option means that all values are stored in blob files. Note that + * enable_blob_files has to be set in order for this option to have any effect. + * + * Default: 0 + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param minBlobSize the size of the smallest value to be stored separately in a blob file + * @return the reference to the current options. + */ + T setMinBlobSize(final long minBlobSize); + + /** + * Get the size of the smallest value to be stored separately in a blob file. Values + * which have an uncompressed size smaller than this threshold are stored + * alongside the keys in SST files in the usual fashion. A value of zero for + * this option means that all values are stored in blob files. Note that + * enable_blob_files has to be set in order for this option to have any effect. + * + * Default: 0 + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @return the current minimum size of value which is stored separately in a blob + */ + long minBlobSize(); + + /** + * Set the size limit for blob files. When writing blob files, a new file is opened + * once this limit is reached. Note that enable_blob_files has to be set in + * order for this option to have any effect. + * + * Default: 256 MB + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param blobFileSize the size limit for blob files + * + * @return the reference to the current options. + */ + T setBlobFileSize(final long blobFileSize); + + /** + * The size limit for blob files. When writing blob files, a new file is opened + * once this limit is reached. + * + * @return the current size limit for blob files + */ + long blobFileSize(); + + /** + * Set the compression algorithm to use for large values stored in blob files. Note + * that enable_blob_files has to be set in order for this option to have any + * effect. + * + * Default: no compression + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param compressionType the compression algorithm to use. + * + * @return the reference to the current options. + */ + T setBlobCompressionType(CompressionType compressionType); + + /** + * Get the compression algorithm in use for large values stored in blob files. + * Note that enable_blob_files has to be set in order for this option to have any + * effect. + * + * @return the current compression algorithm + */ + CompressionType blobCompressionType(); + + /** + * Enable/disable garbage collection of blobs. Blob GC is performed as part of + * compaction. Valid blobs residing in blob files older than a cutoff get + * relocated to new files as they are encountered during compaction, which makes + * it possible to clean up blob files once they contain nothing but + * obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below. + * + * Default: false + * + * @param enableBlobGarbageCollection the new enabled/disabled state of blob garbage collection + * + * @return the reference to the current options. + */ + T setEnableBlobGarbageCollection(final boolean enableBlobGarbageCollection); + + /** + * Query whether garbage collection of blobs is enabled.Blob GC is performed as part of + * compaction. Valid blobs residing in blob files older than a cutoff get + * relocated to new files as they are encountered during compaction, which makes + * it possible to clean up blob files once they contain nothing but + * obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below. + * + * Default: false + * + * @return true if blob garbage collection is currently enabled. + */ + boolean enableBlobGarbageCollection(); + + /** + * Set cutoff in terms of blob file age for garbage collection. Blobs in the + * oldest N blob files will be relocated when encountered during compaction, + * where N = garbage_collection_cutoff * number_of_blob_files. Note that + * enable_blob_garbage_collection has to be set in order for this option to have + * any effect. + * + * Default: 0.25 + * + * @param blobGarbageCollectionAgeCutoff the new age cutoff + * + * @return the reference to the current options. + */ + T setBlobGarbageCollectionAgeCutoff(double blobGarbageCollectionAgeCutoff); + /** + * Get cutoff in terms of blob file age for garbage collection. Blobs in the + * oldest N blob files will be relocated when encountered during compaction, + * where N = garbage_collection_cutoff * number_of_blob_files. Note that + * enable_blob_garbage_collection has to be set in order for this option to have + * any effect. + * + * Default: 0.25 + * + * @return the current age cutoff for garbage collection + */ + double blobGarbageCollectionAgeCutoff(); + + /** + * If the ratio of garbage in the oldest blob files exceeds this threshold, + * targeted compactions are scheduled in order to force garbage collecting + * the blob files in question, assuming they are all eligible based on the + * value of {@link #blobGarbageCollectionAgeCutoff} above. This option is + * currently only supported with leveled compactions. + * + * Note that {@link #enableBlobGarbageCollection} has to be set in order for this + * option to have any effect. + * + * Default: 1.0 + * + * Dynamically changeable through the SetOptions() API + * + * @param blobGarbageCollectionForceThreshold new value for the threshold + * @return the reference to the current options + */ + T setBlobGarbageCollectionForceThreshold(double blobGarbageCollectionForceThreshold); + + /** + * Get the current value for the {@link #blobGarbageCollectionForceThreshold} + * @return the current threshold at which garbage collection of blobs is forced + */ + double blobGarbageCollectionForceThreshold(); + + /** + * Set compaction readahead for blob files. + * + * Default: 0 + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param blobCompactionReadaheadSize the compaction readahead for blob files + * + * @return the reference to the current options. + */ + T setBlobCompactionReadaheadSize(final long blobCompactionReadaheadSize); + + /** + * Get compaction readahead for blob files. + * + * @return the current compaction readahead for blob files + */ + long blobCompactionReadaheadSize(); + + /** + * Set a certain LSM tree level to enable blob files. + * + * Default: 0 + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param blobFileStartingLevel the starting level to enable blob files + * + * @return the reference to the current options. + */ + T setBlobFileStartingLevel(final int blobFileStartingLevel); + + /** + * Get the starting LSM tree level to enable blob files. + * + * Default: 0 + * + * @return the current LSM tree level to enable blob files. + */ + int blobFileStartingLevel(); + + /** + * Set a certain prepopulate blob cache option. + * + * Default: 0 + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param prepopulateBlobCache the prepopulate blob cache option + * + * @return the reference to the current options. + */ + T setPrepopulateBlobCache(final PrepopulateBlobCache prepopulateBlobCache); + + /** + * Get the prepopulate blob cache option. + * + * Default: 0 + * + * @return the current prepopulate blob cache option. + */ + PrepopulateBlobCache prepopulateBlobCache(); + + // + // END options for blobs (integrated BlobDB) + // +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BackgroundErrorReason.java b/src/rocksdb/java/src/main/java/org/rocksdb/BackgroundErrorReason.java new file mode 100644 index 000000000..eec593d35 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/BackgroundErrorReason.java @@ -0,0 +1,46 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum BackgroundErrorReason { + FLUSH((byte) 0x0), + COMPACTION((byte) 0x1), + WRITE_CALLBACK((byte) 0x2), + MEMTABLE((byte) 0x3); + + private final byte value; + + BackgroundErrorReason(final byte value) { + this.value = value; + } + + /** + * Get the internal representation. + * + * @return the internal representation + */ + byte getValue() { + return value; + } + + /** + * Get the BackgroundErrorReason from the internal representation value. + * + * @return the background error reason. + * + * @throws IllegalArgumentException if the value is unknown. + */ + static BackgroundErrorReason fromValue(final byte value) { + for (final BackgroundErrorReason backgroundErrorReason : BackgroundErrorReason.values()) { + if (backgroundErrorReason.value == value) { + return backgroundErrorReason; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for BackgroundErrorReason: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java b/src/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java new file mode 100644 index 000000000..515824a91 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java @@ -0,0 +1,259 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import java.util.List; + +/** + * BackupEngine allows you to backup + * and restore the database + * + * Be aware, that `new BackupEngine` takes time proportional to the amount + * of backups. So if you have a slow filesystem to backup + * and you have a lot of backups then restoring can take some time. + * That's why we recommend to limit the number of backups. + * Also we recommend to keep BackupEngine alive and not to recreate it every + * time you need to do a backup. + */ +public class BackupEngine extends RocksObject implements AutoCloseable { + + protected BackupEngine(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Opens a new Backup Engine + * + * @param env The environment that the backup engine should operate within + * @param options Any options for the backup engine + * + * @return A new BackupEngine instance + * @throws RocksDBException thrown if the backup engine could not be opened + */ + public static BackupEngine open(final Env env, final BackupEngineOptions options) + throws RocksDBException { + return new BackupEngine(open(env.nativeHandle_, options.nativeHandle_)); + } + + /** + * Captures the state of the database in the latest backup + * + * Just a convenience for {@link #createNewBackup(RocksDB, boolean)} with + * the flushBeforeBackup parameter set to false + * + * @param db The database to backup + * + * Note - This method is not thread safe + * + * @throws RocksDBException thrown if a new backup could not be created + */ + public void createNewBackup(final RocksDB db) throws RocksDBException { + createNewBackup(db, false); + } + + /** + * Captures the state of the database in the latest backup + * + * @param db The database to backup + * @param flushBeforeBackup When true, the Backup Engine will first issue a + * memtable flush and only then copy the DB files to + * the backup directory. Doing so will prevent log + * files from being copied to the backup directory + * (since flush will delete them). + * When false, the Backup Engine will not issue a + * flush before starting the backup. In that case, + * the backup will also include log files + * corresponding to live memtables. If writes have + * been performed with the write ahead log disabled, + * set flushBeforeBackup to true to prevent those + * writes from being lost. Otherwise, the backup will + * always be consistent with the current state of the + * database regardless of the flushBeforeBackup + * parameter. + * + * Note - This method is not thread safe + * + * @throws RocksDBException thrown if a new backup could not be created + */ + public void createNewBackup( + final RocksDB db, final boolean flushBeforeBackup) + throws RocksDBException { + assert (isOwningHandle()); + createNewBackup(nativeHandle_, db.nativeHandle_, flushBeforeBackup); + } + + /** + * Captures the state of the database in the latest backup along with + * application specific metadata. + * + * @param db The database to backup + * @param metadata Application metadata + * @param flushBeforeBackup When true, the Backup Engine will first issue a + * memtable flush and only then copy the DB files to + * the backup directory. Doing so will prevent log + * files from being copied to the backup directory + * (since flush will delete them). + * When false, the Backup Engine will not issue a + * flush before starting the backup. In that case, + * the backup will also include log files + * corresponding to live memtables. If writes have + * been performed with the write ahead log disabled, + * set flushBeforeBackup to true to prevent those + * writes from being lost. Otherwise, the backup will + * always be consistent with the current state of the + * database regardless of the flushBeforeBackup + * parameter. + * + * Note - This method is not thread safe + * + * @throws RocksDBException thrown if a new backup could not be created + */ + public void createNewBackupWithMetadata(final RocksDB db, final String metadata, + final boolean flushBeforeBackup) throws RocksDBException { + assert (isOwningHandle()); + createNewBackupWithMetadata(nativeHandle_, db.nativeHandle_, metadata, flushBeforeBackup); + } + + /** + * Gets information about the available + * backups + * + * @return A list of information about each available backup + */ + public List getBackupInfo() { + assert (isOwningHandle()); + return getBackupInfo(nativeHandle_); + } + + /** + *

Returns a list of corrupted backup ids. If there + * is no corrupted backup the method will return an + * empty list.

+ * + * @return array of backup ids as int ids. + */ + public int[] getCorruptedBackups() { + assert(isOwningHandle()); + return getCorruptedBackups(nativeHandle_); + } + + /** + *

Will delete all the files we don't need anymore. It will + * do the full scan of the files/ directory and delete all the + * files that are not referenced.

+ * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void garbageCollect() throws RocksDBException { + assert(isOwningHandle()); + garbageCollect(nativeHandle_); + } + + /** + * Deletes old backups, keeping just the latest numBackupsToKeep + * + * @param numBackupsToKeep The latest n backups to keep + * + * @throws RocksDBException thrown if the old backups could not be deleted + */ + public void purgeOldBackups( + final int numBackupsToKeep) throws RocksDBException { + assert (isOwningHandle()); + purgeOldBackups(nativeHandle_, numBackupsToKeep); + } + + /** + * Deletes a backup + * + * @param backupId The id of the backup to delete + * + * @throws RocksDBException thrown if the backup could not be deleted + */ + public void deleteBackup(final int backupId) throws RocksDBException { + assert (isOwningHandle()); + deleteBackup(nativeHandle_, backupId); + } + + /** + * Restore the database from a backup + * + * IMPORTANT: if options.share_table_files == true and you restore the DB + * from some backup that is not the latest, and you start creating new + * backups from the new DB, they will probably fail! + * + * Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3. + * If you add new data to the DB and try creating a new backup now, the + * database will diverge from backups 4 and 5 and the new backup will fail. + * If you want to create new backup, you will first have to delete backups 4 + * and 5. + * + * @param backupId The id of the backup to restore + * @param dbDir The directory to restore the backup to, i.e. where your + * database is + * @param walDir The location of the log files for your database, + * often the same as dbDir + * @param restoreOptions Options for controlling the restore + * + * @throws RocksDBException thrown if the database could not be restored + */ + public void restoreDbFromBackup( + final int backupId, final String dbDir, final String walDir, + final RestoreOptions restoreOptions) throws RocksDBException { + assert (isOwningHandle()); + restoreDbFromBackup(nativeHandle_, backupId, dbDir, walDir, + restoreOptions.nativeHandle_); + } + + /** + * Restore the database from the latest backup + * + * @param dbDir The directory to restore the backup to, i.e. where your + * database is + * @param walDir The location of the log files for your database, often the + * same as dbDir + * @param restoreOptions Options for controlling the restore + * + * @throws RocksDBException thrown if the database could not be restored + */ + public void restoreDbFromLatestBackup( + final String dbDir, final String walDir, + final RestoreOptions restoreOptions) throws RocksDBException { + assert (isOwningHandle()); + restoreDbFromLatestBackup(nativeHandle_, dbDir, walDir, + restoreOptions.nativeHandle_); + } + + private native static long open(final long env, final long backupEngineOptions) + throws RocksDBException; + + private native void createNewBackup(final long handle, final long dbHandle, + final boolean flushBeforeBackup) throws RocksDBException; + + private native void createNewBackupWithMetadata(final long handle, final long dbHandle, + final String metadata, final boolean flushBeforeBackup) throws RocksDBException; + + private native List getBackupInfo(final long handle); + + private native int[] getCorruptedBackups(final long handle); + + private native void garbageCollect(final long handle) throws RocksDBException; + + private native void purgeOldBackups(final long handle, + final int numBackupsToKeep) throws RocksDBException; + + private native void deleteBackup(final long handle, final int backupId) + throws RocksDBException; + + private native void restoreDbFromBackup(final long handle, final int backupId, + final String dbDir, final String walDir, final long restoreOptionsHandle) + throws RocksDBException; + + private native void restoreDbFromLatestBackup(final long handle, + final String dbDir, final String walDir, final long restoreOptionsHandle) + throws RocksDBException; + + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BackupEngineOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/BackupEngineOptions.java new file mode 100644 index 000000000..6e2dacc02 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/BackupEngineOptions.java @@ -0,0 +1,458 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.io.File; + +/** + *

BackupEngineOptions controls the behavior of a + * {@link org.rocksdb.BackupEngine}. + *

+ *

Note that dispose() must be called before an Options instance + * become out-of-scope to release the allocated memory in c++.

+ * + * @see org.rocksdb.BackupEngine + */ +public class BackupEngineOptions extends RocksObject { + private Env backupEnv = null; + private Logger infoLog = null; + private RateLimiter backupRateLimiter = null; + private RateLimiter restoreRateLimiter = null; + + /** + *

BackupEngineOptions constructor.

+ * + * @param path Where to keep the backup files. Has to be different than db + * name. Best to set this to {@code db name_ + "/backups"} + * @throws java.lang.IllegalArgumentException if illegal path is used. + */ + public BackupEngineOptions(final String path) { + super(newBackupEngineOptions(ensureWritableFile(path))); + } + + private static String ensureWritableFile(final String path) { + final File backupPath = path == null ? null : new File(path); + if (backupPath == null || !backupPath.isDirectory() || + !backupPath.canWrite()) { + throw new IllegalArgumentException("Illegal path provided."); + } else { + return path; + } + } + + /** + *

Returns the path to the BackupEngine directory.

+ * + * @return the path to the BackupEngine directory. + */ + public String backupDir() { + assert(isOwningHandle()); + return backupDir(nativeHandle_); + } + + /** + * Backup Env object. It will be used for backup file I/O. If it's + * null, backups will be written out using DBs Env. Otherwise + * backup's I/O will be performed using this object. + * + * Default: null + * + * @param env The environment to use + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setBackupEnv(final Env env) { + assert(isOwningHandle()); + setBackupEnv(nativeHandle_, env.nativeHandle_); + this.backupEnv = env; + return this; + } + + /** + * Backup Env object. It will be used for backup file I/O. If it's + * null, backups will be written out using DBs Env. Otherwise + * backup's I/O will be performed using this object. + * + * Default: null + * + * @return The environment in use + */ + public Env backupEnv() { + return this.backupEnv; + } + + /** + *

Share table files between backups.

+ * + * @param shareTableFiles If {@code share_table_files == true}, backup will + * assume that table files with same name have the same contents. This + * enables incremental backups and avoids unnecessary data copies. If + * {@code share_table_files == false}, each backup will be on its own and + * will not share any data with other backups. + * + *

Default: true

+ * + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setShareTableFiles(final boolean shareTableFiles) { + assert(isOwningHandle()); + setShareTableFiles(nativeHandle_, shareTableFiles); + return this; + } + + /** + *

Share table files between backups.

+ * + * @return boolean value indicating if SST files will be shared between + * backups. + */ + public boolean shareTableFiles() { + assert(isOwningHandle()); + return shareTableFiles(nativeHandle_); + } + + /** + * Set the logger to use for Backup info and error messages + * + * @param logger The logger to use for the backup + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setInfoLog(final Logger logger) { + assert(isOwningHandle()); + setInfoLog(nativeHandle_, logger.nativeHandle_); + this.infoLog = logger; + return this; + } + + /** + * Set the logger to use for Backup info and error messages + * + * Default: null + * + * @return The logger in use for the backup + */ + public Logger infoLog() { + return this.infoLog; + } + + /** + *

Set synchronous backups.

+ * + * @param sync If {@code sync == true}, we can guarantee you'll get consistent + * backup even on a machine crash/reboot. Backup process is slower with sync + * enabled. If {@code sync == false}, we don't guarantee anything on machine + * reboot. However, chances are some of the backups are consistent. + * + *

Default: true

+ * + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setSync(final boolean sync) { + assert(isOwningHandle()); + setSync(nativeHandle_, sync); + return this; + } + + /** + *

Are synchronous backups activated.

+ * + * @return boolean value if synchronous backups are configured. + */ + public boolean sync() { + assert(isOwningHandle()); + return sync(nativeHandle_); + } + + /** + *

Set if old data will be destroyed.

+ * + * @param destroyOldData If true, it will delete whatever backups there are + * already. + * + *

Default: false

+ * + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setDestroyOldData(final boolean destroyOldData) { + assert(isOwningHandle()); + setDestroyOldData(nativeHandle_, destroyOldData); + return this; + } + + /** + *

Returns if old data will be destroyed will performing new backups.

+ * + * @return boolean value indicating if old data will be destroyed. + */ + public boolean destroyOldData() { + assert(isOwningHandle()); + return destroyOldData(nativeHandle_); + } + + /** + *

Set if log files shall be persisted.

+ * + * @param backupLogFiles If false, we won't backup log files. This option can + * be useful for backing up in-memory databases where log file are + * persisted, but table files are in memory. + * + *

Default: true

+ * + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setBackupLogFiles(final boolean backupLogFiles) { + assert(isOwningHandle()); + setBackupLogFiles(nativeHandle_, backupLogFiles); + return this; + } + + /** + *

Return information if log files shall be persisted.

+ * + * @return boolean value indicating if log files will be persisted. + */ + public boolean backupLogFiles() { + assert(isOwningHandle()); + return backupLogFiles(nativeHandle_); + } + + /** + *

Set backup rate limit.

+ * + * @param backupRateLimit Max bytes that can be transferred in a second during + * backup. If 0 or negative, then go as fast as you can. + * + *

Default: 0

+ * + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setBackupRateLimit(long backupRateLimit) { + assert(isOwningHandle()); + backupRateLimit = (backupRateLimit <= 0) ? 0 : backupRateLimit; + setBackupRateLimit(nativeHandle_, backupRateLimit); + return this; + } + + /** + *

Return backup rate limit which described the max bytes that can be + * transferred in a second during backup.

+ * + * @return numerical value describing the backup transfer limit in bytes per + * second. + */ + public long backupRateLimit() { + assert(isOwningHandle()); + return backupRateLimit(nativeHandle_); + } + + /** + * Backup rate limiter. Used to control transfer speed for backup. If this is + * not null, {@link #backupRateLimit()} is ignored. + * + * Default: null + * + * @param backupRateLimiter The rate limiter to use for the backup + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setBackupRateLimiter(final RateLimiter backupRateLimiter) { + assert(isOwningHandle()); + setBackupRateLimiter(nativeHandle_, backupRateLimiter.nativeHandle_); + this.backupRateLimiter = backupRateLimiter; + return this; + } + + /** + * Backup rate limiter. Used to control transfer speed for backup. If this is + * not null, {@link #backupRateLimit()} is ignored. + * + * Default: null + * + * @return The rate limiter in use for the backup + */ + public RateLimiter backupRateLimiter() { + assert(isOwningHandle()); + return this.backupRateLimiter; + } + + /** + *

Set restore rate limit.

+ * + * @param restoreRateLimit Max bytes that can be transferred in a second + * during restore. If 0 or negative, then go as fast as you can. + * + *

Default: 0

+ * + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setRestoreRateLimit(long restoreRateLimit) { + assert(isOwningHandle()); + restoreRateLimit = (restoreRateLimit <= 0) ? 0 : restoreRateLimit; + setRestoreRateLimit(nativeHandle_, restoreRateLimit); + return this; + } + + /** + *

Return restore rate limit which described the max bytes that can be + * transferred in a second during restore.

+ * + * @return numerical value describing the restore transfer limit in bytes per + * second. + */ + public long restoreRateLimit() { + assert(isOwningHandle()); + return restoreRateLimit(nativeHandle_); + } + + /** + * Restore rate limiter. Used to control transfer speed during restore. If + * this is not null, {@link #restoreRateLimit()} is ignored. + * + * Default: null + * + * @param restoreRateLimiter The rate limiter to use during restore + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setRestoreRateLimiter(final RateLimiter restoreRateLimiter) { + assert(isOwningHandle()); + setRestoreRateLimiter(nativeHandle_, restoreRateLimiter.nativeHandle_); + this.restoreRateLimiter = restoreRateLimiter; + return this; + } + + /** + * Restore rate limiter. Used to control transfer speed during restore. If + * this is not null, {@link #restoreRateLimit()} is ignored. + * + * Default: null + * + * @return The rate limiter in use during restore + */ + public RateLimiter restoreRateLimiter() { + assert(isOwningHandle()); + return this.restoreRateLimiter; + } + + /** + *

Only used if share_table_files is set to true. If true, will consider + * that backups can come from different databases, hence a sst is not uniquely + * identified by its name, but by the triple (file name, crc32, file length) + *

+ * + * @param shareFilesWithChecksum boolean value indicating if SST files are + * stored using the triple (file name, crc32, file length) and not its name. + * + *

Note: this is an experimental option, and you'll need to set it manually + * turn it on only if you know what you're doing*

+ * + *

Default: false

+ * + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setShareFilesWithChecksum(final boolean shareFilesWithChecksum) { + assert(isOwningHandle()); + setShareFilesWithChecksum(nativeHandle_, shareFilesWithChecksum); + return this; + } + + /** + *

Return of share files with checksum is active.

+ * + * @return boolean value indicating if share files with checksum + * is active. + */ + public boolean shareFilesWithChecksum() { + assert(isOwningHandle()); + return shareFilesWithChecksum(nativeHandle_); + } + + /** + * Up to this many background threads will copy files for + * {@link BackupEngine#createNewBackup(RocksDB, boolean)} and + * {@link BackupEngine#restoreDbFromBackup(int, String, String, RestoreOptions)} + * + * Default: 1 + * + * @param maxBackgroundOperations The maximum number of background threads + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setMaxBackgroundOperations(final int maxBackgroundOperations) { + assert(isOwningHandle()); + setMaxBackgroundOperations(nativeHandle_, maxBackgroundOperations); + return this; + } + + /** + * Up to this many background threads will copy files for + * {@link BackupEngine#createNewBackup(RocksDB, boolean)} and + * {@link BackupEngine#restoreDbFromBackup(int, String, String, RestoreOptions)} + * + * Default: 1 + * + * @return The maximum number of background threads + */ + public int maxBackgroundOperations() { + assert(isOwningHandle()); + return maxBackgroundOperations(nativeHandle_); + } + + /** + * During backup user can get callback every time next + * {@link #callbackTriggerIntervalSize()} bytes being copied. + * + * Default: 4194304 + * + * @param callbackTriggerIntervalSize The interval size for the + * callback trigger + * @return instance of current BackupEngineOptions. + */ + public BackupEngineOptions setCallbackTriggerIntervalSize( + final long callbackTriggerIntervalSize) { + assert(isOwningHandle()); + setCallbackTriggerIntervalSize(nativeHandle_, callbackTriggerIntervalSize); + return this; + } + + /** + * During backup user can get callback every time next + * {@link #callbackTriggerIntervalSize()} bytes being copied. + * + * Default: 4194304 + * + * @return The interval size for the callback trigger + */ + public long callbackTriggerIntervalSize() { + assert(isOwningHandle()); + return callbackTriggerIntervalSize(nativeHandle_); + } + + private native static long newBackupEngineOptions(final String path); + private native String backupDir(long handle); + private native void setBackupEnv(final long handle, final long envHandle); + private native void setShareTableFiles(long handle, boolean flag); + private native boolean shareTableFiles(long handle); + private native void setInfoLog(final long handle, final long infoLogHandle); + private native void setSync(long handle, boolean flag); + private native boolean sync(long handle); + private native void setDestroyOldData(long handle, boolean flag); + private native boolean destroyOldData(long handle); + private native void setBackupLogFiles(long handle, boolean flag); + private native boolean backupLogFiles(long handle); + private native void setBackupRateLimit(long handle, long rateLimit); + private native long backupRateLimit(long handle); + private native void setBackupRateLimiter(long handle, long rateLimiterHandle); + private native void setRestoreRateLimit(long handle, long rateLimit); + private native long restoreRateLimit(long handle); + private native void setRestoreRateLimiter(final long handle, + final long rateLimiterHandle); + private native void setShareFilesWithChecksum(long handle, boolean flag); + private native boolean shareFilesWithChecksum(long handle); + private native void setMaxBackgroundOperations(final long handle, + final int maxBackgroundOperations); + private native int maxBackgroundOperations(final long handle); + private native void setCallbackTriggerIntervalSize(final long handle, + long callbackTriggerIntervalSize); + private native long callbackTriggerIntervalSize(final long handle); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java new file mode 100644 index 000000000..9244e4eb1 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java @@ -0,0 +1,76 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +/** + * Instances of this class describe a Backup made by + * {@link org.rocksdb.BackupEngine}. + */ +public class BackupInfo { + + /** + * Package private constructor used to create instances + * of BackupInfo by {@link org.rocksdb.BackupEngine} + * + * @param backupId id of backup + * @param timestamp timestamp of backup + * @param size size of backup + * @param numberFiles number of files related to this backup. + */ + BackupInfo(final int backupId, final long timestamp, final long size, final int numberFiles, + final String app_metadata) { + backupId_ = backupId; + timestamp_ = timestamp; + size_ = size; + numberFiles_ = numberFiles; + app_metadata_ = app_metadata; + } + + /** + * + * @return the backup id. + */ + public int backupId() { + return backupId_; + } + + /** + * + * @return the timestamp of the backup. + */ + public long timestamp() { + return timestamp_; + } + + /** + * + * @return the size of the backup + */ + public long size() { + return size_; + } + + /** + * + * @return the number of files of this backup. + */ + public int numberFiles() { + return numberFiles_; + } + + /** + * + * @return the associated application metadata, or null + */ + public String appMetadata() { + return app_metadata_; + } + + private int backupId_; + private long timestamp_; + private long size_; + private int numberFiles_; + private String app_metadata_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java new file mode 100644 index 000000000..0404fc620 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java @@ -0,0 +1,1055 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +/** + * The config for plain table sst format. + * + * BlockBasedTable is a RocksDB's default SST file format. + */ +//TODO(AR) should be renamed BlockBasedTableOptions +public class BlockBasedTableConfig extends TableFormatConfig { + + public BlockBasedTableConfig() { + //TODO(AR) flushBlockPolicyFactory + cacheIndexAndFilterBlocks = false; + cacheIndexAndFilterBlocksWithHighPriority = true; + pinL0FilterAndIndexBlocksInCache = false; + pinTopLevelIndexAndFilter = true; + indexType = IndexType.kBinarySearch; + dataBlockIndexType = DataBlockIndexType.kDataBlockBinarySearch; + dataBlockHashTableUtilRatio = 0.75; + checksumType = ChecksumType.kCRC32c; + noBlockCache = false; + blockCache = null; + persistentCache = null; + blockCacheCompressed = null; + blockSize = 4 * 1024; + blockSizeDeviation = 10; + blockRestartInterval = 16; + indexBlockRestartInterval = 1; + metadataBlockSize = 4096; + partitionFilters = false; + optimizeFiltersForMemory = false; + useDeltaEncoding = true; + filterPolicy = null; + wholeKeyFiltering = true; + verifyCompression = false; + readAmpBytesPerBit = 0; + formatVersion = 5; + enableIndexCompression = true; + blockAlign = false; + indexShortening = IndexShorteningMode.kShortenSeparators; + + // NOTE: ONLY used if blockCache == null + blockCacheSize = 8 * 1024 * 1024; + blockCacheNumShardBits = 0; + + // NOTE: ONLY used if blockCacheCompressed == null + blockCacheCompressedSize = 0; + blockCacheCompressedNumShardBits = 0; + } + + /** + * Indicating if we'd put index/filter blocks to the block cache. + * If not specified, each "table reader" object will pre-load index/filter + * block during table initialization. + * + * @return if index and filter blocks should be put in block cache. + */ + public boolean cacheIndexAndFilterBlocks() { + return cacheIndexAndFilterBlocks; + } + + /** + * Indicating if we'd put index/filter blocks to the block cache. + * If not specified, each "table reader" object will pre-load index/filter + * block during table initialization. + * + * @param cacheIndexAndFilterBlocks and filter blocks should be put in block cache. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setCacheIndexAndFilterBlocks( + final boolean cacheIndexAndFilterBlocks) { + this.cacheIndexAndFilterBlocks = cacheIndexAndFilterBlocks; + return this; + } + + /** + * Indicates if index and filter blocks will be treated as high-priority in the block cache. + * See note below about applicability. If not specified, defaults to true. + * + * @return if index and filter blocks will be treated as high-priority. + */ + public boolean cacheIndexAndFilterBlocksWithHighPriority() { + return cacheIndexAndFilterBlocksWithHighPriority; + } + + /** + * If true, cache index and filter blocks with high priority. If set to true, + * depending on implementation of block cache, index and filter blocks may be + * less likely to be evicted than data blocks. + * + * @param cacheIndexAndFilterBlocksWithHighPriority if index and filter blocks + * will be treated as high-priority. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setCacheIndexAndFilterBlocksWithHighPriority( + final boolean cacheIndexAndFilterBlocksWithHighPriority) { + this.cacheIndexAndFilterBlocksWithHighPriority = cacheIndexAndFilterBlocksWithHighPriority; + return this; + } + + /** + * Indicating if we'd like to pin L0 index/filter blocks to the block cache. + If not specified, defaults to false. + * + * @return if L0 index and filter blocks should be pinned to the block cache. + */ + public boolean pinL0FilterAndIndexBlocksInCache() { + return pinL0FilterAndIndexBlocksInCache; + } + + /** + * Indicating if we'd like to pin L0 index/filter blocks to the block cache. + If not specified, defaults to false. + * + * @param pinL0FilterAndIndexBlocksInCache pin blocks in block cache + * @return the reference to the current config. + */ + public BlockBasedTableConfig setPinL0FilterAndIndexBlocksInCache( + final boolean pinL0FilterAndIndexBlocksInCache) { + this.pinL0FilterAndIndexBlocksInCache = pinL0FilterAndIndexBlocksInCache; + return this; + } + + /** + * Indicates if top-level index and filter blocks should be pinned. + * + * @return if top-level index and filter blocks should be pinned. + */ + public boolean pinTopLevelIndexAndFilter() { + return pinTopLevelIndexAndFilter; + } + + /** + * If cacheIndexAndFilterBlocks is true and the below is true, then + * the top-level index of partitioned filter and index blocks are stored in + * the cache, but a reference is held in the "table reader" object so the + * blocks are pinned and only evicted from cache when the table reader is + * freed. This is not limited to l0 in LSM tree. + * + * @param pinTopLevelIndexAndFilter if top-level index and filter blocks should be pinned. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setPinTopLevelIndexAndFilter(final boolean pinTopLevelIndexAndFilter) { + this.pinTopLevelIndexAndFilter = pinTopLevelIndexAndFilter; + return this; + } + + /** + * Get the index type. + * + * @return the currently set index type + */ + public IndexType indexType() { + return indexType; + } + + /** + * Sets the index type to used with this table. + * + * @param indexType {@link org.rocksdb.IndexType} value + * @return the reference to the current option. + */ + public BlockBasedTableConfig setIndexType( + final IndexType indexType) { + this.indexType = indexType; + return this; + } + + /** + * Get the data block index type. + * + * @return the currently set data block index type + */ + public DataBlockIndexType dataBlockIndexType() { + return dataBlockIndexType; + } + + /** + * Sets the data block index type to used with this table. + * + * @param dataBlockIndexType {@link org.rocksdb.DataBlockIndexType} value + * @return the reference to the current option. + */ + public BlockBasedTableConfig setDataBlockIndexType( + final DataBlockIndexType dataBlockIndexType) { + this.dataBlockIndexType = dataBlockIndexType; + return this; + } + + /** + * Get the #entries/#buckets. It is valid only when {@link #dataBlockIndexType()} is + * {@link DataBlockIndexType#kDataBlockBinaryAndHash}. + * + * @return the #entries/#buckets. + */ + public double dataBlockHashTableUtilRatio() { + return dataBlockHashTableUtilRatio; + } + + /** + * Set the #entries/#buckets. It is valid only when {@link #dataBlockIndexType()} is + * {@link DataBlockIndexType#kDataBlockBinaryAndHash}. + * + * @param dataBlockHashTableUtilRatio #entries/#buckets + * @return the reference to the current option. + */ + public BlockBasedTableConfig setDataBlockHashTableUtilRatio( + final double dataBlockHashTableUtilRatio) { + this.dataBlockHashTableUtilRatio = dataBlockHashTableUtilRatio; + return this; + } + + /** + * Get the checksum type to be used with this table. + * + * @return the currently set checksum type + */ + public ChecksumType checksumType() { + return checksumType; + } + + /** + * Sets + * + * @param checksumType {@link org.rocksdb.ChecksumType} value. + * @return the reference to the current option. + */ + public BlockBasedTableConfig setChecksumType( + final ChecksumType checksumType) { + this.checksumType = checksumType; + return this; + } + + /** + * Determine if the block cache is disabled. + * + * @return if block cache is disabled + */ + public boolean noBlockCache() { + return noBlockCache; + } + + /** + * Disable block cache. If this is set to true, + * then no block cache should be used, and the {@link #setBlockCache(Cache)} + * should point to a {@code null} object. + * + * Default: false + * + * @param noBlockCache if use block cache + * @return the reference to the current config. + */ + public BlockBasedTableConfig setNoBlockCache(final boolean noBlockCache) { + this.noBlockCache = noBlockCache; + return this; + } + + /** + * Use the specified cache for blocks. + * When not null this take precedence even if the user sets a block cache size. + * + * {@link org.rocksdb.Cache} should not be disposed before options instances + * using this cache is disposed. + * + * {@link org.rocksdb.Cache} instance can be re-used in multiple options + * instances. + * + * @param blockCache {@link org.rocksdb.Cache} Cache java instance + * (e.g. LRUCache). + * + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockCache(final Cache blockCache) { + this.blockCache = blockCache; + return this; + } + + /** + * Use the specified persistent cache. + * + * If {@code !null} use the specified cache for pages read from device, + * otherwise no page cache is used. + * + * @param persistentCache the persistent cache + * + * @return the reference to the current config. + */ + public BlockBasedTableConfig setPersistentCache( + final PersistentCache persistentCache) { + this.persistentCache = persistentCache; + return this; + } + + /** + * Use the specified cache for compressed blocks. + * + * If {@code null}, RocksDB will not use a compressed block cache. + * + * Note: though it looks similar to {@link #setBlockCache(Cache)}, RocksDB + * doesn't put the same type of object there. + * + * {@link org.rocksdb.Cache} should not be disposed before options instances + * using this cache is disposed. + * + * {@link org.rocksdb.Cache} instance can be re-used in multiple options + * instances. + * + * @param blockCacheCompressed {@link org.rocksdb.Cache} Cache java instance + * (e.g. LRUCache). + * + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockCacheCompressed( + final Cache blockCacheCompressed) { + this.blockCacheCompressed = blockCacheCompressed; + return this; + } + + /** + * Get the approximate size of user data packed per block. + * + * @return block size in bytes + */ + public long blockSize() { + return blockSize; + } + + /** + * Approximate size of user data packed per block. Note that the + * block size specified here corresponds to uncompressed data. The + * actual size of the unit read from disk may be smaller if + * compression is enabled. This parameter can be changed dynamically. + * Default: 4K + * + * @param blockSize block size in bytes + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockSize(final long blockSize) { + this.blockSize = blockSize; + return this; + } + + /** + * @return the hash table ratio. + */ + public int blockSizeDeviation() { + return blockSizeDeviation; + } + + /** + * This is used to close a block before it reaches the configured + * {@link #blockSize()}. If the percentage of free space in the current block + * is less than this specified number and adding a new record to the block + * will exceed the configured block size, then this block will be closed and + * the new record will be written to the next block. + * + * Default is 10. + * + * @param blockSizeDeviation the deviation to block size allowed + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockSizeDeviation( + final int blockSizeDeviation) { + this.blockSizeDeviation = blockSizeDeviation; + return this; + } + + /** + * Get the block restart interval. + * + * @return block restart interval + */ + public int blockRestartInterval() { + return blockRestartInterval; + } + + /** + * Set the block restart interval. + * + * @param restartInterval block restart interval. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockRestartInterval( + final int restartInterval) { + blockRestartInterval = restartInterval; + return this; + } + + /** + * Get the index block restart interval. + * + * @return index block restart interval + */ + public int indexBlockRestartInterval() { + return indexBlockRestartInterval; + } + + /** + * Set the index block restart interval + * + * @param restartInterval index block restart interval. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setIndexBlockRestartInterval( + final int restartInterval) { + indexBlockRestartInterval = restartInterval; + return this; + } + + /** + * Get the block size for partitioned metadata. + * + * @return block size for partitioned metadata. + */ + public long metadataBlockSize() { + return metadataBlockSize; + } + + /** + * Set block size for partitioned metadata. + * + * @param metadataBlockSize Partitioned metadata block size. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setMetadataBlockSize( + final long metadataBlockSize) { + this.metadataBlockSize = metadataBlockSize; + return this; + } + + /** + * Indicates if we're using partitioned filters. + * + * @return if we're using partition filters. + */ + public boolean partitionFilters() { + return partitionFilters; + } + + /** + * Use partitioned full filters for each SST file. This option is incompatible + * with block-based filters. + * + * Defaults to false. + * + * @param partitionFilters use partition filters. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setPartitionFilters(final boolean partitionFilters) { + this.partitionFilters = partitionFilters; + return this; + } + + /*** + * Option to generate Bloom filters that minimize memory + * internal fragmentation. + * + * See {@link #setOptimizeFiltersForMemory(boolean)}. + * + * @return true if bloom filters are used to minimize memory internal + * fragmentation + */ + @Experimental("Option to generate Bloom filters that minimize memory internal fragmentation") + public boolean optimizeFiltersForMemory() { + return optimizeFiltersForMemory; + } + + /** + * Option to generate Bloom filters that minimize memory + * internal fragmentation. + * + * When false, malloc_usable_size is not available, or format_version < 5, + * filters are generated without regard to internal fragmentation when + * loaded into memory (historical behavior). When true (and + * malloc_usable_size is available and {@link #formatVersion()} >= 5), + * then Bloom filters are generated to "round up" and "round down" their + * sizes to minimize internal fragmentation when loaded into memory, assuming + * the reading DB has the same memory allocation characteristics as the + * generating DB. This option does not break forward or backward + * compatibility. + * + * While individual filters will vary in bits/key and false positive rate + * when setting is true, the implementation attempts to maintain a weighted + * average FP rate for filters consistent with this option set to false. + * + * With Jemalloc for example, this setting is expected to save about 10% of + * the memory footprint and block cache charge of filters, while increasing + * disk usage of filters by about 1-2% due to encoding efficiency losses + * with variance in bits/key. + * + * NOTE: Because some memory counted by block cache might be unmapped pages + * within internal fragmentation, this option can increase observed RSS + * memory usage. With {@link #cacheIndexAndFilterBlocks()} == true, + * this option makes the block cache better at using space it is allowed. + * + * NOTE: Do not set to true if you do not trust malloc_usable_size. With + * this option, RocksDB might access an allocated memory object beyond its + * original size if malloc_usable_size says it is safe to do so. While this + * can be considered bad practice, it should not produce undefined behavior + * unless malloc_usable_size is buggy or broken. + * + * @param optimizeFiltersForMemory true to enable Bloom filters that minimize + * memory internal fragmentation, or false to disable. + * + * @return the reference to the current config. + */ + @Experimental("Option to generate Bloom filters that minimize memory internal fragmentation") + public BlockBasedTableConfig setOptimizeFiltersForMemory(final boolean optimizeFiltersForMemory) { + this.optimizeFiltersForMemory = optimizeFiltersForMemory; + return this; + } + + /** + * Determine if delta encoding is being used to compress block keys. + * + * @return true if delta encoding is enabled, false otherwise. + */ + public boolean useDeltaEncoding() { + return useDeltaEncoding; + } + + /** + * Use delta encoding to compress keys in blocks. + * + * NOTE: {@link ReadOptions#pinData()} requires this option to be disabled. + * + * Default: true + * + * @param useDeltaEncoding true to enable delta encoding + * + * @return the reference to the current config. + */ + public BlockBasedTableConfig setUseDeltaEncoding( + final boolean useDeltaEncoding) { + this.useDeltaEncoding = useDeltaEncoding; + return this; + } + + /** + * Get the filter policy. + * + * @return the current filter policy. + */ + public Filter filterPolicy() { + return filterPolicy; + } + + /** + * Use the specified filter policy to reduce disk reads. + * + * {@link org.rocksdb.Filter} should not be closed before options instances + * using this filter are closed. + * + * {@link org.rocksdb.Filter} instance can be re-used in multiple options + * instances. + * + * @param filterPolicy {@link org.rocksdb.Filter} Filter Policy java instance. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setFilterPolicy( + final Filter filterPolicy) { + this.filterPolicy = filterPolicy; + return this; + } + + /** + * Set the filter. + * + * @param filter the filter + * @return the reference to the current config. + * + * @deprecated Use {@link #setFilterPolicy(Filter)} + */ + @Deprecated + public BlockBasedTableConfig setFilter( + final Filter filter) { + return setFilterPolicy(filter); + } + + /** + * Determine if whole keys as opposed to prefixes are placed in the filter. + * + * @return if whole key filtering is enabled + */ + public boolean wholeKeyFiltering() { + return wholeKeyFiltering; + } + + /** + * If true, place whole keys in the filter (not just prefixes). + * This must generally be true for gets to be efficient. + * Default: true + * + * @param wholeKeyFiltering if enable whole key filtering + * @return the reference to the current config. + */ + public BlockBasedTableConfig setWholeKeyFiltering( + final boolean wholeKeyFiltering) { + this.wholeKeyFiltering = wholeKeyFiltering; + return this; + } + + /** + * Returns true when compression verification is enabled. + * + * See {@link #setVerifyCompression(boolean)}. + * + * @return true if compression verification is enabled. + */ + public boolean verifyCompression() { + return verifyCompression; + } + + /** + * Verify that decompressing the compressed block gives back the input. This + * is a verification mode that we use to detect bugs in compression + * algorithms. + * + * @param verifyCompression true to enable compression verification. + * + * @return the reference to the current config. + */ + public BlockBasedTableConfig setVerifyCompression( + final boolean verifyCompression) { + this.verifyCompression = verifyCompression; + return this; + } + + /** + * Get the Read amplification bytes per-bit. + * + * See {@link #setReadAmpBytesPerBit(int)}. + * + * @return the bytes per-bit. + */ + public int readAmpBytesPerBit() { + return readAmpBytesPerBit; + } + + /** + * Set the Read amplification bytes per-bit. + * + * If used, For every data block we load into memory, we will create a bitmap + * of size ((block_size / `read_amp_bytes_per_bit`) / 8) bytes. This bitmap + * will be used to figure out the percentage we actually read of the blocks. + * + * When this feature is used Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES and + * Tickers::READ_AMP_TOTAL_READ_BYTES can be used to calculate the + * read amplification using this formula + * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) + * + * value => memory usage (percentage of loaded blocks memory) + * 1 => 12.50 % + * 2 => 06.25 % + * 4 => 03.12 % + * 8 => 01.56 % + * 16 => 00.78 % + * + * Note: This number must be a power of 2, if not it will be sanitized + * to be the next lowest power of 2, for example a value of 7 will be + * treated as 4, a value of 19 will be treated as 16. + * + * Default: 0 (disabled) + * + * @param readAmpBytesPerBit the bytes per-bit + * + * @return the reference to the current config. + */ + public BlockBasedTableConfig setReadAmpBytesPerBit(final int readAmpBytesPerBit) { + this.readAmpBytesPerBit = readAmpBytesPerBit; + return this; + } + + /** + * Get the format version. + * See {@link #setFormatVersion(int)}. + * + * @return the currently configured format version. + */ + public int formatVersion() { + return formatVersion; + } + + /** + *

We currently have five versions:

+ * + *
    + *
  • 0 - This version is currently written + * out by all RocksDB's versions by default. Can be read by really old + * RocksDB's. Doesn't support changing checksum (default is CRC32).
  • + *
  • 1 - Can be read by RocksDB's versions since 3.0. + * Supports non-default checksum, like xxHash. It is written by RocksDB when + * BlockBasedTableOptions::checksum is something other than kCRC32c. (version + * 0 is silently upconverted)
  • + *
  • 2 - Can be read by RocksDB's versions since 3.10. + * Changes the way we encode compressed blocks with LZ4, BZip2 and Zlib + * compression. If you don't plan to run RocksDB before version 3.10, + * you should probably use this.
  • + *
  • 3 - Can be read by RocksDB's versions since 5.15. Changes the way we + * encode the keys in index blocks. If you don't plan to run RocksDB before + * version 5.15, you should probably use this. + * This option only affects newly written tables. When reading existing + * tables, the information about version is read from the footer.
  • + *
  • 4 - Can be read by RocksDB's versions since 5.16. Changes the way we + * encode the values in index blocks. If you don't plan to run RocksDB before + * version 5.16 and you are using index_block_restart_interval > 1, you should + * probably use this as it would reduce the index size. + * This option only affects newly written tables. When reading existing + * tables, the information about version is read from the footer.
  • + *
  • 5 - Can be read by RocksDB's versions since 6.6.0. + * Full and partitioned filters use a generally faster and more accurate + * Bloom filter implementation, with a different schema.
  • + *
+ * + * @param formatVersion integer representing the version to be used. + * + * @return the reference to the current option. + */ + public BlockBasedTableConfig setFormatVersion( + final int formatVersion) { + assert (formatVersion >= 0); + this.formatVersion = formatVersion; + return this; + } + + /** + * Determine if index compression is enabled. + * + * See {@link #setEnableIndexCompression(boolean)}. + * + * @return true if index compression is enabled, false otherwise + */ + public boolean enableIndexCompression() { + return enableIndexCompression; + } + + /** + * Store index blocks on disk in compressed format. + * + * Changing this option to false will avoid the overhead of decompression + * if index blocks are evicted and read back. + * + * @param enableIndexCompression true to enable index compression, + * false to disable + * + * @return the reference to the current option. + */ + public BlockBasedTableConfig setEnableIndexCompression( + final boolean enableIndexCompression) { + this.enableIndexCompression = enableIndexCompression; + return this; + } + + /** + * Determines whether data blocks are aligned on the lesser of page size + * and block size. + * + * @return true if data blocks are aligned on the lesser of page size + * and block size. + */ + public boolean blockAlign() { + return blockAlign; + } + + /** + * Set whether data blocks should be aligned on the lesser of page size + * and block size. + * + * @param blockAlign true to align data blocks on the lesser of page size + * and block size. + * + * @return the reference to the current option. + */ + public BlockBasedTableConfig setBlockAlign(final boolean blockAlign) { + this.blockAlign = blockAlign; + return this; + } + + /** + * Get the index shortening mode. + * + * @return the index shortening mode. + */ + public IndexShorteningMode indexShortening() { + return indexShortening; + } + + /** + * Set the index shortening mode. + * + * See {@link IndexShorteningMode}. + * + * @param indexShortening the index shortening mode. + * + * @return the reference to the current option. + */ + public BlockBasedTableConfig setIndexShortening(final IndexShorteningMode indexShortening) { + this.indexShortening = indexShortening; + return this; + } + + /** + * Get the size of the cache in bytes that will be used by RocksDB. + * + * @return block cache size in bytes + */ + @Deprecated + public long blockCacheSize() { + return blockCacheSize; + } + + /** + * Set the size of the cache in bytes that will be used by RocksDB. + * If cacheSize is negative, then cache will not be used. + * DEFAULT: 8M + * + * @param blockCacheSize block cache size in bytes + * @return the reference to the current config. + * + * @deprecated Use {@link #setBlockCache(Cache)}. + */ + @Deprecated + public BlockBasedTableConfig setBlockCacheSize(final long blockCacheSize) { + this.blockCacheSize = blockCacheSize; + return this; + } + + /** + * Returns the number of shard bits used in the block cache. + * The resulting number of shards would be 2 ^ (returned value). + * Any negative number means use default settings. + * + * @return the number of shard bits used in the block cache. + */ + @Deprecated + public int cacheNumShardBits() { + return blockCacheNumShardBits; + } + + /** + * Controls the number of shards for the block cache. + * This is applied only if cacheSize is set to non-negative. + * + * @param blockCacheNumShardBits the number of shard bits. The resulting + * number of shards would be 2 ^ numShardBits. Any negative + * number means use default settings." + * @return the reference to the current option. + * + * @deprecated Use {@link #setBlockCache(Cache)}. + */ + @Deprecated + public BlockBasedTableConfig setCacheNumShardBits( + final int blockCacheNumShardBits) { + this.blockCacheNumShardBits = blockCacheNumShardBits; + return this; + } + + /** + * Size of compressed block cache. If 0, then block_cache_compressed is set + * to null. + * + * @return size of compressed block cache. + */ + @Deprecated + public long blockCacheCompressedSize() { + return blockCacheCompressedSize; + } + + /** + * Size of compressed block cache. If 0, then block_cache_compressed is set + * to null. + * + * @param blockCacheCompressedSize of compressed block cache. + * @return the reference to the current config. + * + * @deprecated Use {@link #setBlockCacheCompressed(Cache)}. + */ + @Deprecated + public BlockBasedTableConfig setBlockCacheCompressedSize( + final long blockCacheCompressedSize) { + this.blockCacheCompressedSize = blockCacheCompressedSize; + return this; + } + + /** + * Controls the number of shards for the block compressed cache. + * This is applied only if blockCompressedCacheSize is set to non-negative. + * + * @return numShardBits the number of shard bits. The resulting + * number of shards would be 2 ^ numShardBits. Any negative + * number means use default settings. + */ + @Deprecated + public int blockCacheCompressedNumShardBits() { + return blockCacheCompressedNumShardBits; + } + + /** + * Controls the number of shards for the block compressed cache. + * This is applied only if blockCompressedCacheSize is set to non-negative. + * + * @param blockCacheCompressedNumShardBits the number of shard bits. The resulting + * number of shards would be 2 ^ numShardBits. Any negative + * number means use default settings." + * @return the reference to the current option. + * + * @deprecated Use {@link #setBlockCacheCompressed(Cache)}. + */ + @Deprecated + public BlockBasedTableConfig setBlockCacheCompressedNumShardBits( + final int blockCacheCompressedNumShardBits) { + this.blockCacheCompressedNumShardBits = blockCacheCompressedNumShardBits; + return this; + } + + /** + * Influence the behavior when kHashSearch is used. + * if false, stores a precise prefix to block range mapping + * if true, does not store prefix and allows prefix hash collision + * (less memory consumption) + * + * @return if hash collisions should be allowed. + * + * @deprecated This option is now deprecated. No matter what value it + * is set to, it will behave as + * if {@link #hashIndexAllowCollision()} == true. + */ + @Deprecated + public boolean hashIndexAllowCollision() { + return true; + } + + /** + * Influence the behavior when kHashSearch is used. + * if false, stores a precise prefix to block range mapping + * if true, does not store prefix and allows prefix hash collision + * (less memory consumption) + * + * @param hashIndexAllowCollision points out if hash collisions should be allowed. + * + * @return the reference to the current config. + * + * @deprecated This option is now deprecated. No matter what value it + * is set to, it will behave as + * if {@link #hashIndexAllowCollision()} == true. + */ + @Deprecated + public BlockBasedTableConfig setHashIndexAllowCollision( + final boolean hashIndexAllowCollision) { + // no-op + return this; + } + + @Override protected long newTableFactoryHandle() { + final long filterPolicyHandle; + if (filterPolicy != null) { + filterPolicyHandle = filterPolicy.nativeHandle_; + } else { + filterPolicyHandle = 0; + } + + final long blockCacheHandle; + if (blockCache != null) { + blockCacheHandle = blockCache.nativeHandle_; + } else { + blockCacheHandle = 0; + } + + final long persistentCacheHandle; + if (persistentCache != null) { + persistentCacheHandle = persistentCache.nativeHandle_; + } else { + persistentCacheHandle = 0; + } + + final long blockCacheCompressedHandle; + if (blockCacheCompressed != null) { + blockCacheCompressedHandle = blockCacheCompressed.nativeHandle_; + } else { + blockCacheCompressedHandle = 0; + } + + return newTableFactoryHandle(cacheIndexAndFilterBlocks, + cacheIndexAndFilterBlocksWithHighPriority, pinL0FilterAndIndexBlocksInCache, + pinTopLevelIndexAndFilter, indexType.getValue(), dataBlockIndexType.getValue(), + dataBlockHashTableUtilRatio, checksumType.getValue(), noBlockCache, blockCacheHandle, + persistentCacheHandle, blockCacheCompressedHandle, blockSize, blockSizeDeviation, + blockRestartInterval, indexBlockRestartInterval, metadataBlockSize, partitionFilters, + optimizeFiltersForMemory, useDeltaEncoding, filterPolicyHandle, wholeKeyFiltering, + verifyCompression, readAmpBytesPerBit, formatVersion, enableIndexCompression, blockAlign, + indexShortening.getValue(), blockCacheSize, blockCacheNumShardBits, + blockCacheCompressedSize, blockCacheCompressedNumShardBits); + } + + private native long newTableFactoryHandle(final boolean cacheIndexAndFilterBlocks, + final boolean cacheIndexAndFilterBlocksWithHighPriority, + final boolean pinL0FilterAndIndexBlocksInCache, final boolean pinTopLevelIndexAndFilter, + final byte indexTypeValue, final byte dataBlockIndexTypeValue, + final double dataBlockHashTableUtilRatio, final byte checksumTypeValue, + final boolean noBlockCache, final long blockCacheHandle, final long persistentCacheHandle, + final long blockCacheCompressedHandle, final long blockSize, final int blockSizeDeviation, + final int blockRestartInterval, final int indexBlockRestartInterval, + final long metadataBlockSize, final boolean partitionFilters, + final boolean optimizeFiltersForMemory, final boolean useDeltaEncoding, + final long filterPolicyHandle, final boolean wholeKeyFiltering, + final boolean verifyCompression, final int readAmpBytesPerBit, final int formatVersion, + final boolean enableIndexCompression, final boolean blockAlign, final byte indexShortening, + + @Deprecated final long blockCacheSize, @Deprecated final int blockCacheNumShardBits, + + @Deprecated final long blockCacheCompressedSize, + @Deprecated final int blockCacheCompressedNumShardBits); + + //TODO(AR) flushBlockPolicyFactory + private boolean cacheIndexAndFilterBlocks; + private boolean cacheIndexAndFilterBlocksWithHighPriority; + private boolean pinL0FilterAndIndexBlocksInCache; + private boolean pinTopLevelIndexAndFilter; + private IndexType indexType; + private DataBlockIndexType dataBlockIndexType; + private double dataBlockHashTableUtilRatio; + private ChecksumType checksumType; + private boolean noBlockCache; + private Cache blockCache; + private PersistentCache persistentCache; + private Cache blockCacheCompressed; + private long blockSize; + private int blockSizeDeviation; + private int blockRestartInterval; + private int indexBlockRestartInterval; + private long metadataBlockSize; + private boolean partitionFilters; + private boolean optimizeFiltersForMemory; + private boolean useDeltaEncoding; + private Filter filterPolicy; + private boolean wholeKeyFiltering; + private boolean verifyCompression; + private int readAmpBytesPerBit; + private int formatVersion; + private boolean enableIndexCompression; + private boolean blockAlign; + private IndexShorteningMode indexShortening; + + // NOTE: ONLY used if blockCache == null + @Deprecated private long blockCacheSize; + @Deprecated private int blockCacheNumShardBits; + + // NOTE: ONLY used if blockCacheCompressed == null + @Deprecated private long blockCacheCompressedSize; + @Deprecated private int blockCacheCompressedNumShardBits; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java new file mode 100644 index 000000000..8aff715b7 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java @@ -0,0 +1,73 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Bloom filter policy that uses a bloom filter with approximately + * the specified number of bits per key. + * + *

+ * Note: if you are using a custom comparator that ignores some parts + * of the keys being compared, you must not use this {@code BloomFilter} + * and must provide your own FilterPolicy that also ignores the + * corresponding parts of the keys. For example, if the comparator + * ignores trailing spaces, it would be incorrect to use a + * FilterPolicy (like {@code BloomFilter}) that does not ignore + * trailing spaces in keys.

+ */ +public class BloomFilter extends Filter { + + private static final double DEFAULT_BITS_PER_KEY = 10.0; + + /** + * BloomFilter constructor + * + *

+ * Callers must delete the result after any database that is using the + * result has been closed.

+ */ + public BloomFilter() { + this(DEFAULT_BITS_PER_KEY); + } + + /** + * BloomFilter constructor + * + *

+ * bits_per_key: bits per key in bloom filter. A good value for bits_per_key + * is 9.9, which yields a filter with ~ 1% false positive rate. + *

+ *

+ * Callers must delete the result after any database that is using the + * result has been closed.

+ * + * @param bitsPerKey number of bits to use + */ + public BloomFilter(final double bitsPerKey) { + super(createNewBloomFilter(bitsPerKey)); + } + + /** + * BloomFilter constructor + * + *

+ * bits_per_key: bits per key in bloom filter. A good value for bits_per_key + * is 10, which yields a filter with ~ 1% false positive rate. + *

default bits_per_key: 10

+ * + *

+ * Callers must delete the result after any database that is using the + * result has been closed.

+ * + * @param bitsPerKey number of bits to use + * @param IGNORED_useBlockBasedMode obsolete, ignored parameter + */ + public BloomFilter(final double bitsPerKey, final boolean IGNORED_useBlockBasedMode) { + this(bitsPerKey); + } + + private native static long createNewBloomFilter(final double bitsKeyKey); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java b/src/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java new file mode 100644 index 000000000..2c89bf218 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java @@ -0,0 +1,20 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Builtin RocksDB comparators + * + *
    + *
  1. BYTEWISE_COMPARATOR - Sorts all keys in ascending bytewise + * order.
  2. + *
  3. REVERSE_BYTEWISE_COMPARATOR - Sorts all keys in descending bytewise + * order
  4. + *
+ */ +public enum BuiltinComparator { + BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java b/src/rocksdb/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java new file mode 100644 index 000000000..8eef95447 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ByteBufferGetStatus.java @@ -0,0 +1,50 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; +import java.util.List; + +/** + * A ByteBuffer containing fetched data, together with a result for the fetch + * and the total size of the object fetched. + * + * Used for the individual results of + * {@link RocksDB#multiGetByteBuffers(List, List)} + * {@link RocksDB#multiGetByteBuffers(List, List, List)} + * {@link RocksDB#multiGetByteBuffers(ReadOptions, List, List)} + * {@link RocksDB#multiGetByteBuffers(ReadOptions, List, List, List)} + */ +public class ByteBufferGetStatus { + public final Status status; + public final int requiredSize; + public final ByteBuffer value; + + /** + * Constructor used for success status, when the value is contained in the buffer + * + * @param status the status of the request to fetch into the buffer + * @param requiredSize the size of the data, which may be bigger than the buffer + * @param value the buffer containing as much of the value as fits + */ + ByteBufferGetStatus(final Status status, final int requiredSize, final ByteBuffer value) { + this.status = status; + this.requiredSize = requiredSize; + this.value = value; + } + + /** + * Constructor used for a failure status, when no value is filled in + * + * @param status the status of the request to fetch into the buffer + */ + ByteBufferGetStatus(final Status status) { + this.status = status; + this.requiredSize = 0; + this.value = null; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Cache.java b/src/rocksdb/java/src/main/java/org/rocksdb/Cache.java new file mode 100644 index 000000000..569a1df06 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Cache.java @@ -0,0 +1,40 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + + +public abstract class Cache extends RocksObject { + protected Cache(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Returns the memory size for the entries + * residing in cache. + * + * @return cache usage size. + * + */ + public long getUsage() { + assert (isOwningHandle()); + return getUsage(this.nativeHandle_); + } + + /** + * Returns the memory size for the entries + * being pinned in cache. + * + * @return cache pinned usage size. + * + */ + public long getPinnedUsage() { + assert (isOwningHandle()); + return getPinnedUsage(this.nativeHandle_); + } + + private native static long getUsage(final long handle); + private native static long getPinnedUsage(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java new file mode 100644 index 000000000..6c87cc188 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CassandraCompactionFilter.java @@ -0,0 +1,19 @@ +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Just a Java wrapper around CassandraCompactionFilter implemented in C++ + */ +public class CassandraCompactionFilter + extends AbstractCompactionFilter { + public CassandraCompactionFilter(boolean purgeTtlOnExpiration, int gcGracePeriodInSeconds) { + super(createNewCassandraCompactionFilter0(purgeTtlOnExpiration, gcGracePeriodInSeconds)); + } + + private native static long createNewCassandraCompactionFilter0( + boolean purgeTtlOnExpiration, int gcGracePeriodInSeconds); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java b/src/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java new file mode 100644 index 000000000..4b0c71ba5 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CassandraValueMergeOperator.java @@ -0,0 +1,25 @@ +// Copyright (c) 2017-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * CassandraValueMergeOperator is a merge operator that merges two cassandra wide column + * values. + */ +public class CassandraValueMergeOperator extends MergeOperator { + public CassandraValueMergeOperator(int gcGracePeriodInSeconds) { + super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, 0)); + } + + public CassandraValueMergeOperator(int gcGracePeriodInSeconds, int operandsLimit) { + super(newSharedCassandraValueMergeOperator(gcGracePeriodInSeconds, operandsLimit)); + } + + private native static long newSharedCassandraValueMergeOperator( + int gcGracePeriodInSeconds, int limit); + + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java b/src/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java new file mode 100644 index 000000000..000969932 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java @@ -0,0 +1,66 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Provides Checkpoint functionality. Checkpoints + * provide persistent snapshots of RocksDB databases. + */ +public class Checkpoint extends RocksObject { + + /** + * Creates a Checkpoint object to be used for creating open-able + * snapshots. + * + * @param db {@link RocksDB} instance. + * @return a Checkpoint instance. + * + * @throws java.lang.IllegalArgumentException if {@link RocksDB} + * instance is null. + * @throws java.lang.IllegalStateException if {@link RocksDB} + * instance is not initialized. + */ + public static Checkpoint create(final RocksDB db) { + if (db == null) { + throw new IllegalArgumentException( + "RocksDB instance shall not be null."); + } else if (!db.isOwningHandle()) { + throw new IllegalStateException( + "RocksDB instance must be initialized."); + } + Checkpoint checkpoint = new Checkpoint(db); + return checkpoint; + } + + /** + *

Builds an open-able snapshot of RocksDB on the same disk, which + * accepts an output directory on the same disk, and under the directory + * (1) hard-linked SST files pointing to existing live SST files + * (2) a copied manifest files and other files

+ * + * @param checkpointPath path to the folder where the snapshot is going + * to be stored. + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void createCheckpoint(final String checkpointPath) + throws RocksDBException { + createCheckpoint(nativeHandle_, checkpointPath); + } + + private Checkpoint(final RocksDB db) { + super(newCheckpoint(db.nativeHandle_)); + this.db_ = db; + } + + private final RocksDB db_; + + private static native long newCheckpoint(long dbHandle); + @Override protected final native void disposeInternal(final long handle); + + private native void createCheckpoint(long handle, String checkpointPath) + throws RocksDBException; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java b/src/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java new file mode 100644 index 000000000..e03fa14ba --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java @@ -0,0 +1,45 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Checksum types used in conjunction with BlockBasedTable. + */ +public enum ChecksumType { + /** + * Not implemented yet. + */ + kNoChecksum((byte) 0), + /** + * CRC32 Checksum + */ + kCRC32c((byte) 1), + /** + * XX Hash + */ + kxxHash((byte) 2), + /** + * XX Hash 64 + */ + kxxHash64((byte) 3), + + kXXH3((byte) 4); + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + private ChecksumType(final byte value) { + value_ = value; + } + + private final byte value_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java b/src/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java new file mode 100644 index 000000000..a66dc0e8a --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ClockCache.java @@ -0,0 +1,59 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Similar to {@link LRUCache}, but based on the CLOCK algorithm with + * better concurrent performance in some cases + */ +public class ClockCache extends Cache { + + /** + * Create a new cache with a fixed size capacity. + * + * @param capacity The fixed size capacity of the cache + */ + public ClockCache(final long capacity) { + super(newClockCache(capacity, -1, false)); + } + + /** + * Create a new cache with a fixed size capacity. The cache is sharded + * to 2^numShardBits shards, by hash of the key. The total capacity + * is divided and evenly assigned to each shard. + * numShardBits = -1 means it is automatically determined: every shard + * will be at least 512KB and number of shard bits will not exceed 6. + * + * @param capacity The fixed size capacity of the cache + * @param numShardBits The cache is sharded to 2^numShardBits shards, + * by hash of the key + */ + public ClockCache(final long capacity, final int numShardBits) { + super(newClockCache(capacity, numShardBits, false)); + } + + /** + * Create a new cache with a fixed size capacity. The cache is sharded + * to 2^numShardBits shards, by hash of the key. The total capacity + * is divided and evenly assigned to each shard. If strictCapacityLimit + * is set, insert to the cache will fail when cache is full. + * numShardBits = -1 means it is automatically determined: every shard + * will be at least 512KB and number of shard bits will not exceed 6. + * + * @param capacity The fixed size capacity of the cache + * @param numShardBits The cache is sharded to 2^numShardBits shards, + * by hash of the key + * @param strictCapacityLimit insert to the cache will fail when cache is full + */ + public ClockCache(final long capacity, final int numShardBits, + final boolean strictCapacityLimit) { + super(newClockCache(capacity, numShardBits, strictCapacityLimit)); + } + + private native static long newClockCache(final long capacity, + final int numShardBits, final boolean strictCapacityLimit); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java new file mode 100644 index 000000000..125a8dcf8 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java @@ -0,0 +1,84 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Arrays; + +/** + *

Describes a column family with a + * name and respective Options.

+ */ +public class ColumnFamilyDescriptor { + + /** + *

Creates a new Column Family using a name and default + * options,

+ * + * @param columnFamilyName name of column family. + * @since 3.10.0 + */ + public ColumnFamilyDescriptor(final byte[] columnFamilyName) { + this(columnFamilyName, new ColumnFamilyOptions()); + } + + /** + *

Creates a new Column Family using a name and custom + * options.

+ * + * @param columnFamilyName name of column family. + * @param columnFamilyOptions options to be used with + * column family. + * @since 3.10.0 + */ + public ColumnFamilyDescriptor(final byte[] columnFamilyName, + final ColumnFamilyOptions columnFamilyOptions) { + columnFamilyName_ = columnFamilyName; + columnFamilyOptions_ = columnFamilyOptions; + } + + /** + * Retrieve name of column family. + * + * @return column family name. + * @since 3.10.0 + */ + public byte[] getName() { + return columnFamilyName_; + } + + /** + * Retrieve assigned options instance. + * + * @return Options instance assigned to this instance. + */ + public ColumnFamilyOptions getOptions() { + return columnFamilyOptions_; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + final ColumnFamilyDescriptor that = (ColumnFamilyDescriptor) o; + return Arrays.equals(columnFamilyName_, that.columnFamilyName_) + && columnFamilyOptions_.nativeHandle_ == that.columnFamilyOptions_.nativeHandle_; + } + + @Override + public int hashCode() { + int result = (int) (columnFamilyOptions_.nativeHandle_ ^ (columnFamilyOptions_.nativeHandle_ >>> 32)); + result = 31 * result + Arrays.hashCode(columnFamilyName_); + return result; + } + + private final byte[] columnFamilyName_; + private final ColumnFamilyOptions columnFamilyOptions_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java new file mode 100644 index 000000000..1ac0a35bb --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -0,0 +1,151 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Arrays; +import java.util.Objects; + +/** + * ColumnFamilyHandle class to hold handles to underlying rocksdb + * ColumnFamily Pointers. + */ +public class ColumnFamilyHandle extends RocksObject { + /** + * Constructs column family Java object, which operates on underlying native object. + * + * @param rocksDB db instance associated with this column family + * @param nativeHandle native handle to underlying native ColumnFamily object + */ + ColumnFamilyHandle(final RocksDB rocksDB, + final long nativeHandle) { + super(nativeHandle); + // rocksDB must point to a valid RocksDB instance; + assert(rocksDB != null); + // ColumnFamilyHandle must hold a reference to the related RocksDB instance + // to guarantee that while a GC cycle starts ColumnFamilyHandle instances + // are freed prior to RocksDB instances. + this.rocksDB_ = rocksDB; + } + + /** + * Constructor called only from JNI. + * + * NOTE: we are producing an additional Java Object here to represent the underlying native C++ + * ColumnFamilyHandle object. The underlying object is not owned by ourselves. The Java API user + * likely already had a ColumnFamilyHandle Java object which owns the underlying C++ object, as + * they will have been presented it when they opened the database or added a Column Family. + * + * + * TODO(AR) - Potentially a better design would be to cache the active Java Column Family Objects + * in RocksDB, and return the same Java Object instead of instantiating a new one here. This could + * also help us to improve the Java API semantics for Java users. See for example + * https://github.com/facebook/rocksdb/issues/2687. + * + * @param nativeHandle native handle to the column family. + */ + ColumnFamilyHandle(final long nativeHandle) { + super(nativeHandle); + rocksDB_ = null; + disOwnNativeHandle(); + } + + /** + * Gets the name of the Column Family. + * + * @return The name of the Column Family. + * + * @throws RocksDBException if an error occurs whilst retrieving the name. + */ + public byte[] getName() throws RocksDBException { + assert(isOwningHandle() || isDefaultColumnFamily()); + return getName(nativeHandle_); + } + + /** + * Gets the ID of the Column Family. + * + * @return the ID of the Column Family. + */ + public int getID() { + assert(isOwningHandle() || isDefaultColumnFamily()); + return getID(nativeHandle_); + } + + /** + * Gets the up-to-date descriptor of the column family + * associated with this handle. Since it fills "*desc" with the up-to-date + * information, this call might internally lock and release DB mutex to + * access the up-to-date CF options. In addition, all the pointer-typed + * options cannot be referenced any longer than the original options exist. + * + * Note that this function is not supported in RocksDBLite. + * + * @return the up-to-date descriptor. + * + * @throws RocksDBException if an error occurs whilst retrieving the + * descriptor. + */ + public ColumnFamilyDescriptor getDescriptor() throws RocksDBException { + assert(isOwningHandle() || isDefaultColumnFamily()); + return getDescriptor(nativeHandle_); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + final ColumnFamilyHandle that = (ColumnFamilyHandle) o; + try { + return rocksDB_.nativeHandle_ == that.rocksDB_.nativeHandle_ && + getID() == that.getID() && + Arrays.equals(getName(), that.getName()); + } catch (RocksDBException e) { + throw new RuntimeException("Cannot compare column family handles", e); + } + } + + @Override + public int hashCode() { + try { + int result = Objects.hash(getID(), rocksDB_.nativeHandle_); + result = 31 * result + Arrays.hashCode(getName()); + return result; + } catch (RocksDBException e) { + throw new RuntimeException("Cannot calculate hash code of column family handle", e); + } + } + + protected boolean isDefaultColumnFamily() { + return nativeHandle_ == rocksDB_.getDefaultColumnFamily().nativeHandle_; + } + + /** + *

Deletes underlying C++ iterator pointer.

+ * + *

Note: the underlying handle can only be safely deleted if the RocksDB + * instance related to a certain ColumnFamilyHandle is still valid and + * initialized. Therefore {@code disposeInternal()} checks if the RocksDB is + * initialized before freeing the native handle.

+ */ + @Override + protected void disposeInternal() { + if(rocksDB_.isOwningHandle()) { + disposeInternal(nativeHandle_); + } + } + + private native byte[] getName(final long handle) throws RocksDBException; + private native int getID(final long handle); + private native ColumnFamilyDescriptor getDescriptor(final long handle) throws RocksDBException; + @Override protected final native void disposeInternal(final long handle); + + private final RocksDB rocksDB_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java new file mode 100644 index 000000000..191904017 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyMetaData.java @@ -0,0 +1,70 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Arrays; +import java.util.List; + +/** + * The metadata that describes a column family. + */ +public class ColumnFamilyMetaData { + private final long size; + private final long fileCount; + private final byte[] name; + private final LevelMetaData[] levels; + + /** + * Called from JNI C++ + */ + private ColumnFamilyMetaData( + final long size, + final long fileCount, + final byte[] name, + final LevelMetaData[] levels) { + this.size = size; + this.fileCount = fileCount; + this.name = name; + this.levels = levels; + } + + /** + * The size of this column family in bytes, which is equal to the sum of + * the file size of its {@link #levels()}. + * + * @return the size of this column family + */ + public long size() { + return size; + } + + /** + * The number of files in this column family. + * + * @return the number of files + */ + public long fileCount() { + return fileCount; + } + + /** + * The name of the column family. + * + * @return the name + */ + public byte[] name() { + return name; + } + + /** + * The metadata of all levels in this column family. + * + * @return the levels metadata + */ + public List levels() { + return Arrays.asList(levels); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java new file mode 100644 index 000000000..a642cb6fa --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -0,0 +1,1540 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.file.Paths; +import java.util.*; + +/** + * ColumnFamilyOptions to control the behavior of a database. It will be used + * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). + * + * As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable} + * and will be automatically released if opened in the preamble of a try with resources block. + */ +public class ColumnFamilyOptions extends RocksObject + implements ColumnFamilyOptionsInterface, + MutableColumnFamilyOptionsInterface { + static { + RocksDB.loadLibrary(); + } + + /** + * Construct ColumnFamilyOptions. + * + * This constructor will create (by allocating a block of memory) + * an {@code rocksdb::ColumnFamilyOptions} in the c++ side. + */ + public ColumnFamilyOptions() { + super(newColumnFamilyOptions()); + } + + /** + * Copy constructor for ColumnFamilyOptions. + * + * NOTE: This does a shallow copy, which means comparator, merge_operator, compaction_filter, + * compaction_filter_factory and other pointers will be cloned! + * + * @param other The ColumnFamilyOptions to copy. + */ + public ColumnFamilyOptions(ColumnFamilyOptions other) { + super(copyColumnFamilyOptions(other.nativeHandle_)); + this.memTableConfig_ = other.memTableConfig_; + this.tableFormatConfig_ = other.tableFormatConfig_; + this.comparator_ = other.comparator_; + this.compactionFilter_ = other.compactionFilter_; + this.compactionFilterFactory_ = other.compactionFilterFactory_; + this.compactionOptionsUniversal_ = other.compactionOptionsUniversal_; + this.compactionOptionsFIFO_ = other.compactionOptionsFIFO_; + this.bottommostCompressionOptions_ = other.bottommostCompressionOptions_; + this.compressionOptions_ = other.compressionOptions_; + this.compactionThreadLimiter_ = other.compactionThreadLimiter_; + this.sstPartitionerFactory_ = other.sstPartitionerFactory_; + } + + /** + * Constructor from Options + * + * @param options The options. + */ + public ColumnFamilyOptions(final Options options) { + super(newColumnFamilyOptionsFromOptions(options.nativeHandle_)); + } + + /** + *

Constructor to be used by + * {@link #getColumnFamilyOptionsFromProps(java.util.Properties)}, + * {@link ColumnFamilyDescriptor#getOptions()} + * and also called via JNI.

+ * + * @param handle native handle to ColumnFamilyOptions instance. + */ + ColumnFamilyOptions(final long handle) { + super(handle); + } + + /** + *

Method to get a options instance by using pre-configured + * property values. If one or many values are undefined in + * the context of RocksDB the method will return a null + * value.

+ * + *

Note: Property keys can be derived from + * getter methods within the options class. Example: the method + * {@code writeBufferSize()} has a property key: + * {@code write_buffer_size}.

+ * + * @param properties {@link java.util.Properties} instance. + * + * @return {@link org.rocksdb.ColumnFamilyOptions instance} + * or null. + * + * @throws java.lang.IllegalArgumentException if null or empty + * {@link Properties} instance is passed to the method call. + */ + public static ColumnFamilyOptions getColumnFamilyOptionsFromProps( + final Properties properties) { + ColumnFamilyOptions columnFamilyOptions = null; + final long handle = + getColumnFamilyOptionsFromProps(Options.getOptionStringFromProps(properties)); + if (handle != 0) { + columnFamilyOptions = new ColumnFamilyOptions(handle); + } + return columnFamilyOptions; + } + + /** + *

Method to get a options instance by using pre-configured + * property values. If one or many values are undefined in + * the context of RocksDB the method will return a null + * value.

+ * + *

Note: Property keys can be derived from + * getter methods within the options class. Example: the method + * {@code writeBufferSize()} has a property key: + * {@code write_buffer_size}.

+ * + * @param cfgOpts ConfigOptions controlling how the properties are parsed. + * @param properties {@link java.util.Properties} instance. + * + * @return {@link org.rocksdb.ColumnFamilyOptions instance} + * or null. + * + * @throws java.lang.IllegalArgumentException if null or empty + * {@link Properties} instance is passed to the method call. + */ + public static ColumnFamilyOptions getColumnFamilyOptionsFromProps( + final ConfigOptions cfgOpts, final Properties properties) { + ColumnFamilyOptions columnFamilyOptions = null; + final long handle = getColumnFamilyOptionsFromProps( + cfgOpts.nativeHandle_, Options.getOptionStringFromProps(properties)); + if (handle != 0){ + columnFamilyOptions = new ColumnFamilyOptions(handle); + } + return columnFamilyOptions; + } + + @Override + public ColumnFamilyOptions oldDefaults(final int majorVersion, final int minorVersion) { + oldDefaults(nativeHandle_, majorVersion, minorVersion); + return this; + } + + @Override + public ColumnFamilyOptions optimizeForSmallDb() { + optimizeForSmallDb(nativeHandle_); + return this; + } + + @Override + public ColumnFamilyOptions optimizeForSmallDb(final Cache cache) { + optimizeForSmallDb(nativeHandle_, cache.getNativeHandle()); + return this; + } + + @Override + public ColumnFamilyOptions optimizeForPointLookup( + final long blockCacheSizeMb) { + optimizeForPointLookup(nativeHandle_, + blockCacheSizeMb); + return this; + } + + @Override + public ColumnFamilyOptions optimizeLevelStyleCompaction() { + optimizeLevelStyleCompaction(nativeHandle_, + DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET); + return this; + } + + @Override + public ColumnFamilyOptions optimizeLevelStyleCompaction( + final long memtableMemoryBudget) { + optimizeLevelStyleCompaction(nativeHandle_, + memtableMemoryBudget); + return this; + } + + @Override + public ColumnFamilyOptions optimizeUniversalStyleCompaction() { + optimizeUniversalStyleCompaction(nativeHandle_, + DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET); + return this; + } + + @Override + public ColumnFamilyOptions optimizeUniversalStyleCompaction( + final long memtableMemoryBudget) { + optimizeUniversalStyleCompaction(nativeHandle_, + memtableMemoryBudget); + return this; + } + + @Override + public ColumnFamilyOptions setComparator( + final BuiltinComparator builtinComparator) { + assert(isOwningHandle()); + setComparatorHandle(nativeHandle_, builtinComparator.ordinal()); + return this; + } + + @Override + public ColumnFamilyOptions setComparator( + final AbstractComparator comparator) { + assert (isOwningHandle()); + setComparatorHandle(nativeHandle_, comparator.nativeHandle_, + comparator.getComparatorType().getValue()); + comparator_ = comparator; + return this; + } + + @Override + public ColumnFamilyOptions setMergeOperatorName(final String name) { + assert (isOwningHandle()); + if (name == null) { + throw new IllegalArgumentException( + "Merge operator name must not be null."); + } + setMergeOperatorName(nativeHandle_, name); + return this; + } + + @Override + public ColumnFamilyOptions setMergeOperator( + final MergeOperator mergeOperator) { + setMergeOperator(nativeHandle_, mergeOperator.nativeHandle_); + return this; + } + + @Override + public ColumnFamilyOptions setCompactionFilter( + final AbstractCompactionFilter> + compactionFilter) { + setCompactionFilterHandle(nativeHandle_, compactionFilter.nativeHandle_); + compactionFilter_ = compactionFilter; + return this; + } + + @Override + public AbstractCompactionFilter> compactionFilter() { + assert (isOwningHandle()); + return compactionFilter_; + } + + @Override + public ColumnFamilyOptions setCompactionFilterFactory(final AbstractCompactionFilterFactory> compactionFilterFactory) { + assert (isOwningHandle()); + setCompactionFilterFactoryHandle(nativeHandle_, compactionFilterFactory.nativeHandle_); + compactionFilterFactory_ = compactionFilterFactory; + return this; + } + + @Override + public AbstractCompactionFilterFactory> compactionFilterFactory() { + assert (isOwningHandle()); + return compactionFilterFactory_; + } + + @Override + public ColumnFamilyOptions setWriteBufferSize(final long writeBufferSize) { + assert(isOwningHandle()); + setWriteBufferSize(nativeHandle_, writeBufferSize); + return this; + } + + @Override + public long writeBufferSize() { + assert(isOwningHandle()); + return writeBufferSize(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxWriteBufferNumber( + final int maxWriteBufferNumber) { + assert(isOwningHandle()); + setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber); + return this; + } + + @Override + public int maxWriteBufferNumber() { + assert(isOwningHandle()); + return maxWriteBufferNumber(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMinWriteBufferNumberToMerge( + final int minWriteBufferNumberToMerge) { + setMinWriteBufferNumberToMerge(nativeHandle_, minWriteBufferNumberToMerge); + return this; + } + + @Override + public int minWriteBufferNumberToMerge() { + return minWriteBufferNumberToMerge(nativeHandle_); + } + + @Override + public ColumnFamilyOptions useFixedLengthPrefixExtractor(final int n) { + assert(isOwningHandle()); + useFixedLengthPrefixExtractor(nativeHandle_, n); + return this; + } + + @Override + public ColumnFamilyOptions useCappedPrefixExtractor(final int n) { + assert(isOwningHandle()); + useCappedPrefixExtractor(nativeHandle_, n); + return this; + } + + @Override + public ColumnFamilyOptions setCompressionType( + final CompressionType compressionType) { + setCompressionType(nativeHandle_, compressionType.getValue()); + return this; + } + + @Override + public CompressionType compressionType() { + return CompressionType.getCompressionType(compressionType(nativeHandle_)); + } + + @Override + public ColumnFamilyOptions setCompressionPerLevel( + final List compressionLevels) { + final byte[] byteCompressionTypes = new byte[ + compressionLevels.size()]; + for (int i = 0; i < compressionLevels.size(); i++) { + byteCompressionTypes[i] = compressionLevels.get(i).getValue(); + } + setCompressionPerLevel(nativeHandle_, byteCompressionTypes); + return this; + } + + @Override + public List compressionPerLevel() { + final byte[] byteCompressionTypes = + compressionPerLevel(nativeHandle_); + final List compressionLevels = new ArrayList<>(); + for (final byte byteCompressionType : byteCompressionTypes) { + compressionLevels.add(CompressionType.getCompressionType( + byteCompressionType)); + } + return compressionLevels; + } + + @Override + public ColumnFamilyOptions setBottommostCompressionType( + final CompressionType bottommostCompressionType) { + setBottommostCompressionType(nativeHandle_, + bottommostCompressionType.getValue()); + return this; + } + + @Override + public CompressionType bottommostCompressionType() { + return CompressionType.getCompressionType( + bottommostCompressionType(nativeHandle_)); + } + + @Override + public ColumnFamilyOptions setBottommostCompressionOptions( + final CompressionOptions bottommostCompressionOptions) { + setBottommostCompressionOptions(nativeHandle_, + bottommostCompressionOptions.nativeHandle_); + this.bottommostCompressionOptions_ = bottommostCompressionOptions; + return this; + } + + @Override + public CompressionOptions bottommostCompressionOptions() { + return this.bottommostCompressionOptions_; + } + + @Override + public ColumnFamilyOptions setCompressionOptions( + final CompressionOptions compressionOptions) { + setCompressionOptions(nativeHandle_, compressionOptions.nativeHandle_); + this.compressionOptions_ = compressionOptions; + return this; + } + + @Override + public CompressionOptions compressionOptions() { + return this.compressionOptions_; + } + + @Override + public ColumnFamilyOptions setNumLevels(final int numLevels) { + setNumLevels(nativeHandle_, numLevels); + return this; + } + + @Override + public int numLevels() { + return numLevels(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setLevelZeroFileNumCompactionTrigger( + final int numFiles) { + setLevelZeroFileNumCompactionTrigger( + nativeHandle_, numFiles); + return this; + } + + @Override + public int levelZeroFileNumCompactionTrigger() { + return levelZeroFileNumCompactionTrigger(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setLevelZeroSlowdownWritesTrigger( + final int numFiles) { + setLevelZeroSlowdownWritesTrigger(nativeHandle_, numFiles); + return this; + } + + @Override + public int levelZeroSlowdownWritesTrigger() { + return levelZeroSlowdownWritesTrigger(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setLevelZeroStopWritesTrigger(final int numFiles) { + setLevelZeroStopWritesTrigger(nativeHandle_, numFiles); + return this; + } + + @Override + public int levelZeroStopWritesTrigger() { + return levelZeroStopWritesTrigger(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setTargetFileSizeBase( + final long targetFileSizeBase) { + setTargetFileSizeBase(nativeHandle_, targetFileSizeBase); + return this; + } + + @Override + public long targetFileSizeBase() { + return targetFileSizeBase(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setTargetFileSizeMultiplier( + final int multiplier) { + setTargetFileSizeMultiplier(nativeHandle_, multiplier); + return this; + } + + @Override + public int targetFileSizeMultiplier() { + return targetFileSizeMultiplier(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxBytesForLevelBase( + final long maxBytesForLevelBase) { + setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase); + return this; + } + + @Override + public long maxBytesForLevelBase() { + return maxBytesForLevelBase(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setLevelCompactionDynamicLevelBytes( + final boolean enableLevelCompactionDynamicLevelBytes) { + setLevelCompactionDynamicLevelBytes(nativeHandle_, + enableLevelCompactionDynamicLevelBytes); + return this; + } + + @Override + public boolean levelCompactionDynamicLevelBytes() { + return levelCompactionDynamicLevelBytes(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxBytesForLevelMultiplier(final double multiplier) { + setMaxBytesForLevelMultiplier(nativeHandle_, multiplier); + return this; + } + + @Override + public double maxBytesForLevelMultiplier() { + return maxBytesForLevelMultiplier(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxCompactionBytes(final long maxCompactionBytes) { + setMaxCompactionBytes(nativeHandle_, maxCompactionBytes); + return this; + } + + @Override + public long maxCompactionBytes() { + return maxCompactionBytes(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setArenaBlockSize( + final long arenaBlockSize) { + setArenaBlockSize(nativeHandle_, arenaBlockSize); + return this; + } + + @Override + public long arenaBlockSize() { + return arenaBlockSize(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setDisableAutoCompactions( + final boolean disableAutoCompactions) { + setDisableAutoCompactions(nativeHandle_, disableAutoCompactions); + return this; + } + + @Override + public boolean disableAutoCompactions() { + return disableAutoCompactions(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setCompactionStyle( + final CompactionStyle compactionStyle) { + setCompactionStyle(nativeHandle_, compactionStyle.getValue()); + return this; + } + + @Override + public CompactionStyle compactionStyle() { + return CompactionStyle.fromValue(compactionStyle(nativeHandle_)); + } + + @Override + public ColumnFamilyOptions setMaxTableFilesSizeFIFO( + final long maxTableFilesSize) { + assert(maxTableFilesSize > 0); // unsigned native type + assert(isOwningHandle()); + setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize); + return this; + } + + @Override + public long maxTableFilesSizeFIFO() { + return maxTableFilesSizeFIFO(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxSequentialSkipInIterations( + final long maxSequentialSkipInIterations) { + setMaxSequentialSkipInIterations(nativeHandle_, + maxSequentialSkipInIterations); + return this; + } + + @Override + public long maxSequentialSkipInIterations() { + return maxSequentialSkipInIterations(nativeHandle_); + } + + @Override + public MemTableConfig memTableConfig() { + return this.memTableConfig_; + } + + @Override + public ColumnFamilyOptions setMemTableConfig( + final MemTableConfig memTableConfig) { + setMemTableFactory( + nativeHandle_, memTableConfig.newMemTableFactoryHandle()); + this.memTableConfig_ = memTableConfig; + return this; + } + + @Override + public String memTableFactoryName() { + assert(isOwningHandle()); + return memTableFactoryName(nativeHandle_); + } + + @Override + public TableFormatConfig tableFormatConfig() { + return this.tableFormatConfig_; + } + + @Override + public ColumnFamilyOptions setTableFormatConfig( + final TableFormatConfig tableFormatConfig) { + setTableFactory(nativeHandle_, tableFormatConfig.newTableFactoryHandle()); + this.tableFormatConfig_ = tableFormatConfig; + return this; + } + + @Override + public String tableFactoryName() { + assert(isOwningHandle()); + return tableFactoryName(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setCfPaths(final Collection cfPaths) { + assert (isOwningHandle()); + + final int len = cfPaths.size(); + final String[] paths = new String[len]; + final long[] targetSizes = new long[len]; + + int i = 0; + for (final DbPath dbPath : cfPaths) { + paths[i] = dbPath.path.toString(); + targetSizes[i] = dbPath.targetSize; + i++; + } + setCfPaths(nativeHandle_, paths, targetSizes); + return this; + } + + @Override + public List cfPaths() { + final int len = (int) cfPathsLen(nativeHandle_); + + if (len == 0) { + return Collections.emptyList(); + } + + final String[] paths = new String[len]; + final long[] targetSizes = new long[len]; + + cfPaths(nativeHandle_, paths, targetSizes); + + final List cfPaths = new ArrayList<>(); + for (int i = 0; i < len; i++) { + cfPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i])); + } + + return cfPaths; + } + + @Override + public ColumnFamilyOptions setInplaceUpdateSupport( + final boolean inplaceUpdateSupport) { + setInplaceUpdateSupport(nativeHandle_, inplaceUpdateSupport); + return this; + } + + @Override + public boolean inplaceUpdateSupport() { + return inplaceUpdateSupport(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setInplaceUpdateNumLocks( + final long inplaceUpdateNumLocks) { + setInplaceUpdateNumLocks(nativeHandle_, inplaceUpdateNumLocks); + return this; + } + + @Override + public long inplaceUpdateNumLocks() { + return inplaceUpdateNumLocks(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMemtablePrefixBloomSizeRatio( + final double memtablePrefixBloomSizeRatio) { + setMemtablePrefixBloomSizeRatio(nativeHandle_, memtablePrefixBloomSizeRatio); + return this; + } + + @Override + public double memtablePrefixBloomSizeRatio() { + return memtablePrefixBloomSizeRatio(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setExperimentalMempurgeThreshold( + final double experimentalMempurgeThreshold) { + setExperimentalMempurgeThreshold(nativeHandle_, experimentalMempurgeThreshold); + return this; + } + + @Override + public double experimentalMempurgeThreshold() { + return experimentalMempurgeThreshold(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMemtableWholeKeyFiltering(final boolean memtableWholeKeyFiltering) { + setMemtableWholeKeyFiltering(nativeHandle_, memtableWholeKeyFiltering); + return this; + } + + @Override + public boolean memtableWholeKeyFiltering() { + return memtableWholeKeyFiltering(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setBloomLocality(int bloomLocality) { + setBloomLocality(nativeHandle_, bloomLocality); + return this; + } + + @Override + public int bloomLocality() { + return bloomLocality(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxSuccessiveMerges( + final long maxSuccessiveMerges) { + setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges); + return this; + } + + @Override + public long maxSuccessiveMerges() { + return maxSuccessiveMerges(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setOptimizeFiltersForHits( + final boolean optimizeFiltersForHits) { + setOptimizeFiltersForHits(nativeHandle_, optimizeFiltersForHits); + return this; + } + + @Override + public boolean optimizeFiltersForHits() { + return optimizeFiltersForHits(nativeHandle_); + } + + @Override + public ColumnFamilyOptions + setMemtableHugePageSize( + long memtableHugePageSize) { + setMemtableHugePageSize(nativeHandle_, + memtableHugePageSize); + return this; + } + + @Override + public long memtableHugePageSize() { + return memtableHugePageSize(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) { + setSoftPendingCompactionBytesLimit(nativeHandle_, + softPendingCompactionBytesLimit); + return this; + } + + @Override + public long softPendingCompactionBytesLimit() { + return softPendingCompactionBytesLimit(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) { + setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit); + return this; + } + + @Override + public long hardPendingCompactionBytesLimit() { + return hardPendingCompactionBytesLimit(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) { + setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger); + return this; + } + + @Override + public int level0FileNumCompactionTrigger() { + return level0FileNumCompactionTrigger(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) { + setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger); + return this; + } + + @Override + public int level0SlowdownWritesTrigger() { + return level0SlowdownWritesTrigger(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setLevel0StopWritesTrigger(int level0StopWritesTrigger) { + setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger); + return this; + } + + @Override + public int level0StopWritesTrigger() { + return level0StopWritesTrigger(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) { + setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional); + return this; + } + + @Override + public int[] maxBytesForLevelMultiplierAdditional() { + return maxBytesForLevelMultiplierAdditional(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setParanoidFileChecks(boolean paranoidFileChecks) { + setParanoidFileChecks(nativeHandle_, paranoidFileChecks); + return this; + } + + @Override + public boolean paranoidFileChecks() { + return paranoidFileChecks(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxWriteBufferNumberToMaintain( + final int maxWriteBufferNumberToMaintain) { + setMaxWriteBufferNumberToMaintain( + nativeHandle_, maxWriteBufferNumberToMaintain); + return this; + } + + @Override + public int maxWriteBufferNumberToMaintain() { + return maxWriteBufferNumberToMaintain(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setCompactionPriority( + final CompactionPriority compactionPriority) { + setCompactionPriority(nativeHandle_, compactionPriority.getValue()); + return this; + } + + @Override + public CompactionPriority compactionPriority() { + return CompactionPriority.getCompactionPriority( + compactionPriority(nativeHandle_)); + } + + @Override + public ColumnFamilyOptions setReportBgIoStats(final boolean reportBgIoStats) { + setReportBgIoStats(nativeHandle_, reportBgIoStats); + return this; + } + + @Override + public boolean reportBgIoStats() { + return reportBgIoStats(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setTtl(final long ttl) { + setTtl(nativeHandle_, ttl); + return this; + } + + @Override + public long ttl() { + return ttl(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setPeriodicCompactionSeconds(final long periodicCompactionSeconds) { + setPeriodicCompactionSeconds(nativeHandle_, periodicCompactionSeconds); + return this; + } + + @Override + public long periodicCompactionSeconds() { + return periodicCompactionSeconds(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setCompactionOptionsUniversal( + final CompactionOptionsUniversal compactionOptionsUniversal) { + setCompactionOptionsUniversal(nativeHandle_, + compactionOptionsUniversal.nativeHandle_); + this.compactionOptionsUniversal_ = compactionOptionsUniversal; + return this; + } + + @Override + public CompactionOptionsUniversal compactionOptionsUniversal() { + return this.compactionOptionsUniversal_; + } + + @Override + public ColumnFamilyOptions setCompactionOptionsFIFO(final CompactionOptionsFIFO compactionOptionsFIFO) { + setCompactionOptionsFIFO(nativeHandle_, + compactionOptionsFIFO.nativeHandle_); + this.compactionOptionsFIFO_ = compactionOptionsFIFO; + return this; + } + + @Override + public CompactionOptionsFIFO compactionOptionsFIFO() { + return this.compactionOptionsFIFO_; + } + + @Override + public ColumnFamilyOptions setForceConsistencyChecks(final boolean forceConsistencyChecks) { + setForceConsistencyChecks(nativeHandle_, forceConsistencyChecks); + return this; + } + + @Override + public boolean forceConsistencyChecks() { + return forceConsistencyChecks(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setSstPartitionerFactory(SstPartitionerFactory sstPartitionerFactory) { + setSstPartitionerFactory(nativeHandle_, sstPartitionerFactory.nativeHandle_); + this.sstPartitionerFactory_ = sstPartitionerFactory; + return this; + } + + @Override + public ColumnFamilyOptions setCompactionThreadLimiter( + final ConcurrentTaskLimiter compactionThreadLimiter) { + setCompactionThreadLimiter(nativeHandle_, compactionThreadLimiter.nativeHandle_); + this.compactionThreadLimiter_ = compactionThreadLimiter; + return this; + } + + @Override + public ConcurrentTaskLimiter compactionThreadLimiter() { + assert (isOwningHandle()); + return this.compactionThreadLimiter_; + } + + @Override + public SstPartitionerFactory sstPartitionerFactory() { + return sstPartitionerFactory_; + } + + // + // BEGIN options for blobs (integrated BlobDB) + // + + /** + * When set, large values (blobs) are written to separate blob files, and only + * pointers to them are stored in SST files. This can reduce write amplification + * for large-value use cases at the cost of introducing a level of indirection + * for reads. See also the options min_blob_size, blob_file_size, + * blob_compression_type, enable_blob_garbage_collection, and + * blob_garbage_collection_age_cutoff below. + * + * Default: false + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param enableBlobFiles true iff blob files should be enabled + * + * @return the reference to the current options. + */ + @Override + public ColumnFamilyOptions setEnableBlobFiles(final boolean enableBlobFiles) { + setEnableBlobFiles(nativeHandle_, enableBlobFiles); + return this; + } + + /** + * When set, large values (blobs) are written to separate blob files, and only + * pointers to them are stored in SST files. This can reduce write amplification + * for large-value use cases at the cost of introducing a level of indirection + * for reads. See also the options min_blob_size, blob_file_size, + * blob_compression_type, enable_blob_garbage_collection, and + * blob_garbage_collection_age_cutoff below. + * + * Default: false + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @return true iff blob files are currently enabled + */ + public boolean enableBlobFiles() { + return enableBlobFiles(nativeHandle_); + } + + /** + * Set the size of the smallest value to be stored separately in a blob file. Values + * which have an uncompressed size smaller than this threshold are stored + * alongside the keys in SST files in the usual fashion. A value of zero for + * this option means that all values are stored in blob files. Note that + * enable_blob_files has to be set in order for this option to have any effect. + * + * Default: 0 + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param minBlobSize the size of the smallest value to be stored separately in a blob file + * @return these options, updated with the supplied minimum blob size value + */ + @Override + public ColumnFamilyOptions setMinBlobSize(final long minBlobSize) { + setMinBlobSize(nativeHandle_, minBlobSize); + return this; + } + + /** + * Get the size of the smallest value to be stored separately in a blob file. Values + * which have an uncompressed size smaller than this threshold are stored + * alongside the keys in SST files in the usual fashion. A value of zero for + * this option means that all values are stored in blob files. Note that + * enable_blob_files has to be set in order for this option to have any effect. + * + * Default: 0 + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @return the current minimum blob size + */ + @Override + public long minBlobSize() { + return minBlobSize(nativeHandle_); + } + + /** + * Set the size limit for blob files. When writing blob files, a new file is opened + * once this limit is reached. Note that enable_blob_files has to be set in + * order for this option to have any effect. + * + * Default: 256 MB + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param blobFileSize the new size limit for blob files + * + * @return the reference to the current options. + */ + @Override + public ColumnFamilyOptions setBlobFileSize(final long blobFileSize) { + setBlobFileSize(nativeHandle_, blobFileSize); + return this; + } + + /** + * Get the size limit for blob files. When writing blob files, a new file is opened + * once this limit is reached. Note that enable_blob_files has to be set in + * order for this option to have any effect. + * + * Default: 256 MB + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @return the size limit for blob files + */ + @Override + public long blobFileSize() { + return blobFileSize(nativeHandle_); + } + + /** + * Set the compression algorithm to use for large values stored in blob files. Note + * that enable_blob_files has to be set in order for this option to have any + * effect. + * + * Default: no compression + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param compressionType the compression algorithm to use + * + * @return the reference to the current options. + */ + @Override + public ColumnFamilyOptions setBlobCompressionType(final CompressionType compressionType) { + setBlobCompressionType(nativeHandle_, compressionType.getValue()); + return this; + } + + /** + * Get the compression algorithm to use for large values stored in blob files. Note + * that enable_blob_files has to be set in order for this option to have any + * effect. + * + * Default: no compression + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @return the compression algorithm currently in use for blobs + */ + @Override + public CompressionType blobCompressionType() { + return CompressionType.values()[blobCompressionType(nativeHandle_)]; + } + + /** + * Enable/disable garbage collection of blobs. Blob GC is performed as part of + * compaction. Valid blobs residing in blob files older than a cutoff get + * relocated to new files as they are encountered during compaction, which makes + * it possible to clean up blob files once they contain nothing but + * obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below. + * + * Default: false + * + * @param enableBlobGarbageCollection true iff blob garbage collection is to be enabled + * + * @return the reference to the current options. + */ + @Override + public ColumnFamilyOptions setEnableBlobGarbageCollection( + final boolean enableBlobGarbageCollection) { + setEnableBlobGarbageCollection(nativeHandle_, enableBlobGarbageCollection); + return this; + } + + /** + * Get enabled/disables state for garbage collection of blobs. Blob GC is performed as part of + * compaction. Valid blobs residing in blob files older than a cutoff get + * relocated to new files as they are encountered during compaction, which makes + * it possible to clean up blob files once they contain nothing but + * obsolete/garbage blobs. See also blob_garbage_collection_age_cutoff below. + * + * Default: false + * + * @return true iff blob garbage collection is currently enabled + */ + @Override + public boolean enableBlobGarbageCollection() { + return enableBlobGarbageCollection(nativeHandle_); + } + + /** + * Set the cutoff in terms of blob file age for garbage collection. Blobs in the + * oldest N blob files will be relocated when encountered during compaction, + * where N = garbage_collection_cutoff * number_of_blob_files. Note that + * enable_blob_garbage_collection has to be set in order for this option to have + * any effect. + * + * Default: 0.25 + * + * @param blobGarbageCollectionAgeCutoff the new blob garbage collection age cutoff + * + * @return the reference to the current options. + */ + @Override + public ColumnFamilyOptions setBlobGarbageCollectionAgeCutoff( + final double blobGarbageCollectionAgeCutoff) { + setBlobGarbageCollectionAgeCutoff(nativeHandle_, blobGarbageCollectionAgeCutoff); + return this; + } + + /** + * Get the cutoff in terms of blob file age for garbage collection. Blobs in the + * oldest N blob files will be relocated when encountered during compaction, + * where N = garbage_collection_cutoff * number_of_blob_files. Note that + * enable_blob_garbage_collection has to be set in order for this option to have + * any effect. + * + * Default: 0.25 + * + * @return the current blob garbage collection age cutoff + */ + @Override + public double blobGarbageCollectionAgeCutoff() { + return blobGarbageCollectionAgeCutoff(nativeHandle_); + } + + /** + * If the ratio of garbage in the oldest blob files exceeds this threshold, + * targeted compactions are scheduled in order to force garbage collecting + * the blob files in question, assuming they are all eligible based on the + * value of {@link #blobGarbageCollectionAgeCutoff} above. This option is + * currently only supported with leveled compactions. + * + * Note that {@link #enableBlobGarbageCollection} has to be set in order for this + * option to have any effect. + * + * Default: 1.0 + * + * Dynamically changeable through the SetOptions() API + * + * @param blobGarbageCollectionForceThreshold new value for the threshold + * @return the reference to the current options + */ + @Override + public ColumnFamilyOptions setBlobGarbageCollectionForceThreshold( + final double blobGarbageCollectionForceThreshold) { + setBlobGarbageCollectionForceThreshold(nativeHandle_, blobGarbageCollectionForceThreshold); + return this; + } + + /** + * Get the current value for the {@link #blobGarbageCollectionForceThreshold} + * @return the current threshold at which garbage collection of blobs is forced + */ + @Override + public double blobGarbageCollectionForceThreshold() { + return blobGarbageCollectionForceThreshold(nativeHandle_); + } + + /** + * Set compaction readahead for blob files. + * + * Default: 0 + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param blobCompactionReadaheadSize the compaction readahead for blob files + * + * @return the reference to the current options. + */ + @Override + public ColumnFamilyOptions setBlobCompactionReadaheadSize( + final long blobCompactionReadaheadSize) { + setBlobCompactionReadaheadSize(nativeHandle_, blobCompactionReadaheadSize); + return this; + } + + /** + * Get compaction readahead for blob files. + * + * @return the current compaction readahead for blob files + */ + @Override + public long blobCompactionReadaheadSize() { + return blobCompactionReadaheadSize(nativeHandle_); + } + + /** + * Set a certain LSM tree level to enable blob files. + * + * Default: 0 + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param blobFileStartingLevel the starting level to enable blob files + * + * @return the reference to the current options. + */ + @Override + public ColumnFamilyOptions setBlobFileStartingLevel(final int blobFileStartingLevel) { + setBlobFileStartingLevel(nativeHandle_, blobFileStartingLevel); + return this; + } + + /** + * Get the starting LSM tree level to enable blob files. + * + * Default: 0 + * + * @return the current LSM tree level to enable blob files. + */ + @Override + public int blobFileStartingLevel() { + return blobFileStartingLevel(nativeHandle_); + } + + /** + * Set a certain prepopulate blob cache option. + * + * Default: 0 + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)}. + * + * @param prepopulateBlobCache the prepopulate blob cache option + * + * @return the reference to the current options. + */ + @Override + public ColumnFamilyOptions setPrepopulateBlobCache( + final PrepopulateBlobCache prepopulateBlobCache) { + setPrepopulateBlobCache(nativeHandle_, prepopulateBlobCache.getValue()); + return this; + } + + /** + * Get the prepopulate blob cache option. + * + * Default: 0 + * + * @return the current prepopulate blob cache option. + */ + @Override + public PrepopulateBlobCache prepopulateBlobCache() { + return PrepopulateBlobCache.getPrepopulateBlobCache(prepopulateBlobCache(nativeHandle_)); + } + + // + // END options for blobs (integrated BlobDB) + // + + private static native long getColumnFamilyOptionsFromProps( + final long cfgHandle, String optString); + private static native long getColumnFamilyOptionsFromProps(final String optString); + + private static native long newColumnFamilyOptions(); + private static native long copyColumnFamilyOptions(final long handle); + private static native long newColumnFamilyOptionsFromOptions( + final long optionsHandle); + @Override protected final native void disposeInternal(final long handle); + + private static native void oldDefaults( + final long handle, final int majorVersion, final int minorVersion); + private native void optimizeForSmallDb(final long handle); + private static native void optimizeForSmallDb(final long handle, final long cacheHandle); + private native void optimizeForPointLookup(long handle, + long blockCacheSizeMb); + private native void optimizeLevelStyleCompaction(long handle, + long memtableMemoryBudget); + private native void optimizeUniversalStyleCompaction(long handle, + long memtableMemoryBudget); + private native void setComparatorHandle(long handle, int builtinComparator); + private native void setComparatorHandle(long optHandle, + long comparatorHandle, byte comparatorType); + private native void setMergeOperatorName(long handle, String name); + private native void setMergeOperator(long handle, long mergeOperatorHandle); + private native void setCompactionFilterHandle(long handle, + long compactionFilterHandle); + private native void setCompactionFilterFactoryHandle(long handle, + long compactionFilterFactoryHandle); + private native void setWriteBufferSize(long handle, long writeBufferSize) + throws IllegalArgumentException; + private native long writeBufferSize(long handle); + private native void setMaxWriteBufferNumber( + long handle, int maxWriteBufferNumber); + private native int maxWriteBufferNumber(long handle); + private native void setMinWriteBufferNumberToMerge( + long handle, int minWriteBufferNumberToMerge); + private native int minWriteBufferNumberToMerge(long handle); + private native void setCompressionType(long handle, byte compressionType); + private native byte compressionType(long handle); + private native void setCompressionPerLevel(long handle, + byte[] compressionLevels); + private native byte[] compressionPerLevel(long handle); + private native void setBottommostCompressionType(long handle, + byte bottommostCompressionType); + private native byte bottommostCompressionType(long handle); + private native void setBottommostCompressionOptions(final long handle, + final long bottommostCompressionOptionsHandle); + private native void setCompressionOptions(long handle, + long compressionOptionsHandle); + private native void useFixedLengthPrefixExtractor( + long handle, int prefixLength); + private native void useCappedPrefixExtractor( + long handle, int prefixLength); + private native void setNumLevels( + long handle, int numLevels); + private native int numLevels(long handle); + private native void setLevelZeroFileNumCompactionTrigger( + long handle, int numFiles); + private native int levelZeroFileNumCompactionTrigger(long handle); + private native void setLevelZeroSlowdownWritesTrigger( + long handle, int numFiles); + private native int levelZeroSlowdownWritesTrigger(long handle); + private native void setLevelZeroStopWritesTrigger( + long handle, int numFiles); + private native int levelZeroStopWritesTrigger(long handle); + private native void setTargetFileSizeBase( + long handle, long targetFileSizeBase); + private native long targetFileSizeBase(long handle); + private native void setTargetFileSizeMultiplier( + long handle, int multiplier); + private native int targetFileSizeMultiplier(long handle); + private native void setMaxBytesForLevelBase( + long handle, long maxBytesForLevelBase); + private native long maxBytesForLevelBase(long handle); + private native void setLevelCompactionDynamicLevelBytes( + long handle, boolean enableLevelCompactionDynamicLevelBytes); + private native boolean levelCompactionDynamicLevelBytes( + long handle); + private native void setMaxBytesForLevelMultiplier(long handle, double multiplier); + private native double maxBytesForLevelMultiplier(long handle); + private native void setMaxCompactionBytes(long handle, long maxCompactionBytes); + private native long maxCompactionBytes(long handle); + private native void setArenaBlockSize( + long handle, long arenaBlockSize) + throws IllegalArgumentException; + private native long arenaBlockSize(long handle); + private native void setDisableAutoCompactions( + long handle, boolean disableAutoCompactions); + private native boolean disableAutoCompactions(long handle); + private native void setCompactionStyle(long handle, byte compactionStyle); + private native byte compactionStyle(long handle); + private native void setMaxTableFilesSizeFIFO( + long handle, long max_table_files_size); + private native long maxTableFilesSizeFIFO(long handle); + private native void setMaxSequentialSkipInIterations( + long handle, long maxSequentialSkipInIterations); + private native long maxSequentialSkipInIterations(long handle); + private native void setMemTableFactory(long handle, long factoryHandle); + private native String memTableFactoryName(long handle); + private native void setTableFactory(long handle, long factoryHandle); + private native String tableFactoryName(long handle); + private static native void setCfPaths( + final long handle, final String[] paths, final long[] targetSizes); + private static native long cfPathsLen(final long handle); + private static native void cfPaths( + final long handle, final String[] paths, final long[] targetSizes); + private native void setInplaceUpdateSupport( + long handle, boolean inplaceUpdateSupport); + private native boolean inplaceUpdateSupport(long handle); + private native void setInplaceUpdateNumLocks( + long handle, long inplaceUpdateNumLocks) + throws IllegalArgumentException; + private native long inplaceUpdateNumLocks(long handle); + private native void setMemtablePrefixBloomSizeRatio( + long handle, double memtablePrefixBloomSizeRatio); + private native double memtablePrefixBloomSizeRatio(long handle); + private native void setExperimentalMempurgeThreshold( + long handle, double experimentalMempurgeThreshold); + private native double experimentalMempurgeThreshold(long handle); + private native void setMemtableWholeKeyFiltering(long handle, boolean memtableWholeKeyFiltering); + private native boolean memtableWholeKeyFiltering(long handle); + private native void setBloomLocality( + long handle, int bloomLocality); + private native int bloomLocality(long handle); + private native void setMaxSuccessiveMerges( + long handle, long maxSuccessiveMerges) + throws IllegalArgumentException; + private native long maxSuccessiveMerges(long handle); + private native void setOptimizeFiltersForHits(long handle, + boolean optimizeFiltersForHits); + private native boolean optimizeFiltersForHits(long handle); + private native void setMemtableHugePageSize(long handle, + long memtableHugePageSize); + private native long memtableHugePageSize(long handle); + private native void setSoftPendingCompactionBytesLimit(long handle, + long softPendingCompactionBytesLimit); + private native long softPendingCompactionBytesLimit(long handle); + private native void setHardPendingCompactionBytesLimit(long handle, + long hardPendingCompactionBytesLimit); + private native long hardPendingCompactionBytesLimit(long handle); + private native void setLevel0FileNumCompactionTrigger(long handle, + int level0FileNumCompactionTrigger); + private native int level0FileNumCompactionTrigger(long handle); + private native void setLevel0SlowdownWritesTrigger(long handle, + int level0SlowdownWritesTrigger); + private native int level0SlowdownWritesTrigger(long handle); + private native void setLevel0StopWritesTrigger(long handle, + int level0StopWritesTrigger); + private native int level0StopWritesTrigger(long handle); + private native void setMaxBytesForLevelMultiplierAdditional(long handle, + int[] maxBytesForLevelMultiplierAdditional); + private native int[] maxBytesForLevelMultiplierAdditional(long handle); + private native void setParanoidFileChecks(long handle, + boolean paranoidFileChecks); + private native boolean paranoidFileChecks(long handle); + private native void setMaxWriteBufferNumberToMaintain(final long handle, + final int maxWriteBufferNumberToMaintain); + private native int maxWriteBufferNumberToMaintain(final long handle); + private native void setCompactionPriority(final long handle, + final byte compactionPriority); + private native byte compactionPriority(final long handle); + private native void setReportBgIoStats(final long handle, + final boolean reportBgIoStats); + private native boolean reportBgIoStats(final long handle); + private native void setTtl(final long handle, final long ttl); + private native long ttl(final long handle); + private native void setPeriodicCompactionSeconds( + final long handle, final long periodicCompactionSeconds); + private native long periodicCompactionSeconds(final long handle); + private native void setCompactionOptionsUniversal(final long handle, + final long compactionOptionsUniversalHandle); + private native void setCompactionOptionsFIFO(final long handle, + final long compactionOptionsFIFOHandle); + private native void setForceConsistencyChecks(final long handle, + final boolean forceConsistencyChecks); + private native boolean forceConsistencyChecks(final long handle); + private native void setSstPartitionerFactory(long nativeHandle_, long newFactoryHandle); + private static native void setCompactionThreadLimiter( + final long nativeHandle_, final long compactionThreadLimiterHandle); + + private native void setEnableBlobFiles(final long nativeHandle_, final boolean enableBlobFiles); + private native boolean enableBlobFiles(final long nativeHandle_); + private native void setMinBlobSize(final long nativeHandle_, final long minBlobSize); + private native long minBlobSize(final long nativeHandle_); + private native void setBlobFileSize(final long nativeHandle_, final long blobFileSize); + private native long blobFileSize(final long nativeHandle_); + private native void setBlobCompressionType(final long nativeHandle_, final byte compressionType); + private native byte blobCompressionType(final long nativeHandle_); + private native void setEnableBlobGarbageCollection( + final long nativeHandle_, final boolean enableBlobGarbageCollection); + private native boolean enableBlobGarbageCollection(final long nativeHandle_); + private native void setBlobGarbageCollectionAgeCutoff( + final long nativeHandle_, final double blobGarbageCollectionAgeCutoff); + private native double blobGarbageCollectionAgeCutoff(final long nativeHandle_); + private native void setBlobGarbageCollectionForceThreshold( + final long nativeHandle_, final double blobGarbageCollectionForceThreshold); + private native double blobGarbageCollectionForceThreshold(final long nativeHandle_); + private native void setBlobCompactionReadaheadSize( + final long nativeHandle_, final long blobCompactionReadaheadSize); + private native long blobCompactionReadaheadSize(final long nativeHandle_); + private native void setBlobFileStartingLevel( + final long nativeHandle_, final int blobFileStartingLevel); + private native int blobFileStartingLevel(final long nativeHandle_); + private native void setPrepopulateBlobCache( + final long nativeHandle_, final byte prepopulateBlobCache); + private native byte prepopulateBlobCache(final long nativeHandle_); + + // instance variables + // NOTE: If you add new member variables, please update the copy constructor above! + private MemTableConfig memTableConfig_; + private TableFormatConfig tableFormatConfig_; + private AbstractComparator comparator_; + private AbstractCompactionFilter> compactionFilter_; + private AbstractCompactionFilterFactory> + compactionFilterFactory_; + private CompactionOptionsUniversal compactionOptionsUniversal_; + private CompactionOptionsFIFO compactionOptionsFIFO_; + private CompressionOptions bottommostCompressionOptions_; + private CompressionOptions compressionOptions_; + private SstPartitionerFactory sstPartitionerFactory_; + private ConcurrentTaskLimiter compactionThreadLimiter_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java new file mode 100644 index 000000000..97357aacf --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java @@ -0,0 +1,536 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Collection; +import java.util.List; + +public interface ColumnFamilyOptionsInterface> + extends AdvancedColumnFamilyOptionsInterface { + /** + * The function recovers options to a previous version. Only 4.6 or later + * versions are supported. + * + * @param majorVersion The major version to recover default values of options + * @param minorVersion The minor version to recover default values of options + * @return the instance of the current object. + */ + T oldDefaults(int majorVersion, int minorVersion); + + /** + * Use this if your DB is very small (like under 1GB) and you don't want to + * spend lots of memory for memtables. + * + * @return the instance of the current object. + */ + T optimizeForSmallDb(); + + /** + * Some functions that make it easier to optimize RocksDB + * Use this if your DB is very small (like under 1GB) and you don't want to + * spend lots of memory for memtables. + * + * @param cache An optional cache object is passed in to be used as the block cache + * @return the instance of the current object. + */ + T optimizeForSmallDb(Cache cache); + + /** + * Use this if you don't need to keep the data sorted, i.e. you'll never use + * an iterator, only Put() and Get() API calls + * + * @param blockCacheSizeMb Block cache size in MB + * @return the instance of the current object. + */ + T optimizeForPointLookup(long blockCacheSizeMb); + + /** + *

Default values for some parameters in ColumnFamilyOptions are not + * optimized for heavy workloads and big datasets, which means you might + * observe write stalls under some conditions. As a starting point for tuning + * RocksDB options, use the following for level style compaction.

+ * + *

Make sure to also call IncreaseParallelism(), which will provide the + * biggest performance gains.

+ *

Note: we might use more memory than memtable_memory_budget during high + * write rate period

+ * + * @return the instance of the current object. + */ + T optimizeLevelStyleCompaction(); + + /** + *

Default values for some parameters in ColumnFamilyOptions are not + * optimized for heavy workloads and big datasets, which means you might + * observe write stalls under some conditions. As a starting point for tuning + * RocksDB options, use the following for level style compaction.

+ * + *

Make sure to also call IncreaseParallelism(), which will provide the + * biggest performance gains.

+ *

Note: we might use more memory than memtable_memory_budget during high + * write rate period

+ * + * @param memtableMemoryBudget memory budget in bytes + * @return the instance of the current object. + */ + T optimizeLevelStyleCompaction( + long memtableMemoryBudget); + + /** + *

Default values for some parameters in ColumnFamilyOptions are not + * optimized for heavy workloads and big datasets, which means you might + * observe write stalls under some conditions. As a starting point for tuning + * RocksDB options, use the following for universal style compaction.

+ * + *

Universal style compaction is focused on reducing Write Amplification + * Factor for big data sets, but increases Space Amplification.

+ * + *

Make sure to also call IncreaseParallelism(), which will provide the + * biggest performance gains.

+ * + *

Note: we might use more memory than memtable_memory_budget during high + * write rate period

+ * + * @return the instance of the current object. + */ + T optimizeUniversalStyleCompaction(); + + /** + *

Default values for some parameters in ColumnFamilyOptions are not + * optimized for heavy workloads and big datasets, which means you might + * observe write stalls under some conditions. As a starting point for tuning + * RocksDB options, use the following for universal style compaction.

+ * + *

Universal style compaction is focused on reducing Write Amplification + * Factor for big data sets, but increases Space Amplification.

+ * + *

Make sure to also call IncreaseParallelism(), which will provide the + * biggest performance gains.

+ * + *

Note: we might use more memory than memtable_memory_budget during high + * write rate period

+ * + * @param memtableMemoryBudget memory budget in bytes + * @return the instance of the current object. + */ + T optimizeUniversalStyleCompaction( + long memtableMemoryBudget); + + /** + * Set {@link BuiltinComparator} to be used with RocksDB. + * + * Note: Comparator can be set once upon database creation. + * + * Default: BytewiseComparator. + * @param builtinComparator a {@link BuiltinComparator} type. + * @return the instance of the current object. + */ + T setComparator( + BuiltinComparator builtinComparator); + + /** + * Use the specified comparator for key ordering. + * + * Comparator should not be disposed before options instances using this comparator is + * disposed. If dispose() function is not called, then comparator object will be + * GC'd automatically. + * + * Comparator instance can be re-used in multiple options instances. + * + * @param comparator java instance. + * @return the instance of the current object. + */ + T setComparator( + AbstractComparator comparator); + + /** + *

Set the merge operator to be used for merging two merge operands + * of the same key. The merge function is invoked during + * compaction and at lookup time, if multiple key/value pairs belonging + * to the same key are found in the database.

+ * + * @param name the name of the merge function, as defined by + * the MergeOperators factory (see utilities/MergeOperators.h) + * The merge function is specified by name and must be one of the + * standard merge operators provided by RocksDB. The available + * operators are "put", "uint64add", "stringappend" and "stringappendtest". + * @return the instance of the current object. + */ + T setMergeOperatorName(String name); + + /** + *

Set the merge operator to be used for merging two different key/value + * pairs that share the same key. The merge function is invoked during + * compaction and at lookup time, if multiple key/value pairs belonging + * to the same key are found in the database.

+ * + * @param mergeOperator {@link MergeOperator} instance. + * @return the instance of the current object. + */ + T setMergeOperator(MergeOperator mergeOperator); + + /** + * A single CompactionFilter instance to call into during compaction. + * Allows an application to modify/delete a key-value during background + * compaction. + * + * If the client requires a new compaction filter to be used for different + * compaction runs, it can specify call + * {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)} + * instead. + * + * The client should specify only set one of the two. + * {@link #setCompactionFilter(AbstractCompactionFilter)} takes precedence + * over {@link #setCompactionFilterFactory(AbstractCompactionFilterFactory)} + * if the client specifies both. + * + * If multithreaded compaction is being used, the supplied CompactionFilter + * instance may be used from different threads concurrently and so should be thread-safe. + * + * @param compactionFilter {@link AbstractCompactionFilter} instance. + * @return the instance of the current object. + */ + T setCompactionFilter( + final AbstractCompactionFilter> compactionFilter); + + /** + * Accessor for the CompactionFilter instance in use. + * + * @return Reference to the CompactionFilter, or null if one hasn't been set. + */ + AbstractCompactionFilter> compactionFilter(); + + /** + * This is a factory that provides {@link AbstractCompactionFilter} objects + * which allow an application to modify/delete a key-value during background + * compaction. + * + * A new filter will be created on each compaction run. If multithreaded + * compaction is being used, each created CompactionFilter will only be used + * from a single thread and so does not need to be thread-safe. + * + * @param compactionFilterFactory {@link AbstractCompactionFilterFactory} instance. + * @return the instance of the current object. + */ + T setCompactionFilterFactory( + final AbstractCompactionFilterFactory> + compactionFilterFactory); + + /** + * Accessor for the CompactionFilterFactory instance in use. + * + * @return Reference to the CompactionFilterFactory, or null if one hasn't been set. + */ + AbstractCompactionFilterFactory> compactionFilterFactory(); + + /** + * This prefix-extractor uses the first n bytes of a key as its prefix. + * + * In some hash-based memtable representation such as HashLinkedList + * and HashSkipList, prefixes are used to partition the keys into + * several buckets. Prefix extractor is used to specify how to + * extract the prefix given a key. + * + * @param n use the first n bytes of a key as its prefix. + * @return the reference to the current option. + */ + T useFixedLengthPrefixExtractor(int n); + + /** + * Same as fixed length prefix extractor, except that when slice is + * shorter than the fixed length, it will use the full key. + * + * @param n use the first n bytes of a key as its prefix. + * @return the reference to the current option. + */ + T useCappedPrefixExtractor(int n); + + /** + * Number of files to trigger level-0 compaction. A value < 0 means that + * level-0 compaction will not be triggered by number of files at all. + * Default: 4 + * + * @param numFiles the number of files in level-0 to trigger compaction. + * @return the reference to the current option. + */ + T setLevelZeroFileNumCompactionTrigger( + int numFiles); + + /** + * The number of files in level 0 to trigger compaction from level-0 to + * level-1. A value < 0 means that level-0 compaction will not be + * triggered by number of files at all. + * Default: 4 + * + * @return the number of files in level 0 to trigger compaction. + */ + int levelZeroFileNumCompactionTrigger(); + + /** + * Soft limit on number of level-0 files. We start slowing down writes at this + * point. A value < 0 means that no writing slow down will be triggered by + * number of files in level-0. + * + * @param numFiles soft limit on number of level-0 files. + * @return the reference to the current option. + */ + T setLevelZeroSlowdownWritesTrigger( + int numFiles); + + /** + * Soft limit on the number of level-0 files. We start slowing down writes + * at this point. A value < 0 means that no writing slow down will be + * triggered by number of files in level-0. + * + * @return the soft limit on the number of level-0 files. + */ + int levelZeroSlowdownWritesTrigger(); + + /** + * Maximum number of level-0 files. We stop writes at this point. + * + * @param numFiles the hard limit of the number of level-0 files. + * @return the reference to the current option. + */ + T setLevelZeroStopWritesTrigger(int numFiles); + + /** + * Maximum number of level-0 files. We stop writes at this point. + * + * @return the hard limit of the number of level-0 file. + */ + int levelZeroStopWritesTrigger(); + + /** + * The ratio between the total size of level-(L+1) files and the total + * size of level-L files for all L. + * DEFAULT: 10 + * + * @param multiplier the ratio between the total size of level-(L+1) + * files and the total size of level-L files for all L. + * @return the reference to the current option. + */ + T setMaxBytesForLevelMultiplier( + double multiplier); + + /** + * The ratio between the total size of level-(L+1) files and the total + * size of level-L files for all L. + * DEFAULT: 10 + * + * @return the ratio between the total size of level-(L+1) files and + * the total size of level-L files for all L. + */ + double maxBytesForLevelMultiplier(); + + /** + * FIFO compaction option. + * The oldest table file will be deleted + * once the sum of table files reaches this size. + * The default value is 1GB (1 * 1024 * 1024 * 1024). + * + * @param maxTableFilesSize the size limit of the total sum of table files. + * @return the instance of the current object. + */ + T setMaxTableFilesSizeFIFO( + long maxTableFilesSize); + + /** + * FIFO compaction option. + * The oldest table file will be deleted + * once the sum of table files reaches this size. + * The default value is 1GB (1 * 1024 * 1024 * 1024). + * + * @return the size limit of the total sum of table files. + */ + long maxTableFilesSizeFIFO(); + + /** + * Get the config for mem-table. + * + * @return the mem-table config. + */ + MemTableConfig memTableConfig(); + + /** + * Set the config for mem-table. + * + * @param memTableConfig the mem-table config. + * @return the instance of the current object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setMemTableConfig(MemTableConfig memTableConfig); + + /** + * Returns the name of the current mem table representation. + * Memtable format can be set using setTableFormatConfig. + * + * @return the name of the currently-used memtable factory. + * @see #setTableFormatConfig(org.rocksdb.TableFormatConfig) + */ + String memTableFactoryName(); + + /** + * Get the config for table format. + * + * @return the table format config. + */ + TableFormatConfig tableFormatConfig(); + + /** + * Set the config for table format. + * + * @param config the table format config. + * @return the reference of the current options. + */ + T setTableFormatConfig(TableFormatConfig config); + + /** + * @return the name of the currently used table factory. + */ + String tableFactoryName(); + + /** + * A list of paths where SST files for this column family + * can be put into, with its target size. Similar to db_paths, + * newer data is placed into paths specified earlier in the + * vector while older data gradually moves to paths specified + * later in the vector. + * Note that, if a path is supplied to multiple column + * families, it would have files and total size from all + * the column families combined. User should provision for the + * total size(from all the column families) in such cases. + * + * If left empty, db_paths will be used. + * Default: empty + * + * @param paths collection of paths for SST files. + * @return the reference of the current options. + */ + T setCfPaths(final Collection paths); + + /** + * @return collection of paths for SST files. + */ + List cfPaths(); + + /** + * Compression algorithm that will be used for the bottommost level that + * contain files. If level-compaction is used, this option will only affect + * levels after base level. + * + * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION} + * + * @param bottommostCompressionType The compression type to use for the + * bottommost level + * + * @return the reference of the current options. + */ + T setBottommostCompressionType( + final CompressionType bottommostCompressionType); + + /** + * Compression algorithm that will be used for the bottommost level that + * contain files. If level-compaction is used, this option will only affect + * levels after base level. + * + * Default: {@link CompressionType#DISABLE_COMPRESSION_OPTION} + * + * @return The compression type used for the bottommost level + */ + CompressionType bottommostCompressionType(); + + /** + * Set the options for compression algorithms used by + * {@link #bottommostCompressionType()} if it is enabled. + * + * To enable it, please see the definition of + * {@link CompressionOptions}. + * + * @param compressionOptions the bottom most compression options. + * + * @return the reference of the current options. + */ + T setBottommostCompressionOptions( + final CompressionOptions compressionOptions); + + /** + * Get the bottom most compression options. + * + * See {@link #setBottommostCompressionOptions(CompressionOptions)}. + * + * @return the bottom most compression options. + */ + CompressionOptions bottommostCompressionOptions(); + + /** + * Set the different options for compression algorithms + * + * @param compressionOptions The compression options + * + * @return the reference of the current options. + */ + T setCompressionOptions( + CompressionOptions compressionOptions); + + /** + * Get the different options for compression algorithms + * + * @return The compression options + */ + CompressionOptions compressionOptions(); + + /** + * If non-nullptr, use the specified factory for a function to determine the + * partitioning of sst files. This helps compaction to split the files + * on interesting boundaries (key prefixes) to make propagation of sst + * files less write amplifying (covering the whole key space). + * + * Default: nullptr + * + * @param factory The factory reference + * @return the reference of the current options. + */ + @Experimental("Caution: this option is experimental") + T setSstPartitionerFactory(SstPartitionerFactory factory); + + /** + * Get SST partitioner factory + * + * @return SST partitioner factory + */ + @Experimental("Caution: this option is experimental") + SstPartitionerFactory sstPartitionerFactory(); + + /** + * Compaction concurrent thread limiter for the column family. + * If non-nullptr, use given concurrent thread limiter to control + * the max outstanding compaction tasks. Limiter can be shared with + * multiple column families across db instances. + * + * @param concurrentTaskLimiter The compaction thread limiter. + * @return the reference of the current options. + */ + T setCompactionThreadLimiter(ConcurrentTaskLimiter concurrentTaskLimiter); + + /** + * Get compaction thread limiter + * + * @return Compaction thread limiter + */ + ConcurrentTaskLimiter compactionThreadLimiter(); + + /** + * Default memtable memory budget used with the following methods: + * + *
    + *
  1. {@link #optimizeLevelStyleCompaction()}
  2. + *
  3. {@link #optimizeUniversalStyleCompaction()}
  4. + *
+ */ + long DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET = 512 * 1024 * 1024; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactRangeOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactRangeOptions.java new file mode 100644 index 000000000..da023d366 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactRangeOptions.java @@ -0,0 +1,238 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * CompactRangeOptions is used by CompactRange() call. In the documentation of the methods "the compaction" refers to + * any compaction that is using this CompactRangeOptions. + */ +public class CompactRangeOptions extends RocksObject { + + private final static byte VALUE_kSkip = 0; + private final static byte VALUE_kIfHaveCompactionFilter = 1; + private final static byte VALUE_kForce = 2; + + // For level based compaction, we can configure if we want to skip/force bottommost level + // compaction. The order of this enum MUST follow the C++ layer. See BottommostLevelCompaction in + // db/options.h + public enum BottommostLevelCompaction { + /** + * Skip bottommost level compaction + */ + kSkip(VALUE_kSkip), + /** + * Only compact bottommost level if there is a compaction filter. This is the default option + */ + kIfHaveCompactionFilter(VALUE_kIfHaveCompactionFilter), + /** + * Always compact bottommost level + */ + kForce(VALUE_kForce); + + private final byte value; + + BottommostLevelCompaction(final byte value) { + this.value = value; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + * Returns the BottommostLevelCompaction for the given C++ rocks enum value. + * @param bottommostLevelCompaction The value of the BottommostLevelCompaction + * @return BottommostLevelCompaction instance, or null if none matches + */ + public static BottommostLevelCompaction fromRocksId(final int bottommostLevelCompaction) { + switch (bottommostLevelCompaction) { + case VALUE_kSkip: return kSkip; + case VALUE_kIfHaveCompactionFilter: return kIfHaveCompactionFilter; + case VALUE_kForce: return kForce; + default: return null; + } + } + } + + /** + * Construct CompactRangeOptions. + */ + public CompactRangeOptions() { + super(newCompactRangeOptions()); + } + + /** + * Returns whether the compaction is exclusive or other compactions may run concurrently at the same time. + * + * @return true if exclusive, false if concurrent + */ + public boolean exclusiveManualCompaction() { + return exclusiveManualCompaction(nativeHandle_); + } + + /** + * Sets whether the compaction is exclusive or other compaction are allowed run concurrently at the same time. + * + * @param exclusiveCompaction true if compaction should be exclusive + * @return This CompactRangeOptions + */ + public CompactRangeOptions setExclusiveManualCompaction(final boolean exclusiveCompaction) { + setExclusiveManualCompaction(nativeHandle_, exclusiveCompaction); + return this; + } + + /** + * Returns whether compacted files will be moved to the minimum level capable of holding the data or given level + * (specified non-negative target_level). + * @return true, if compacted files will be moved to the minimum level + */ + public boolean changeLevel() { + return changeLevel(nativeHandle_); + } + + /** + * Whether compacted files will be moved to the minimum level capable of holding the data or given level + * (specified non-negative target_level). + * + * @param changeLevel If true, compacted files will be moved to the minimum level + * @return This CompactRangeOptions + */ + public CompactRangeOptions setChangeLevel(final boolean changeLevel) { + setChangeLevel(nativeHandle_, changeLevel); + return this; + } + + /** + * If change_level is true and target_level have non-negative value, compacted files will be moved to target_level. + * @return The target level for the compacted files + */ + public int targetLevel() { + return targetLevel(nativeHandle_); + } + + + /** + * If change_level is true and target_level have non-negative value, compacted files will be moved to target_level. + * + * @param targetLevel target level for the compacted files + * @return This CompactRangeOptions + */ + public CompactRangeOptions setTargetLevel(final int targetLevel) { + setTargetLevel(nativeHandle_, targetLevel); + return this; + } + + /** + * target_path_id for compaction output. Compaction outputs will be placed in options.db_paths[target_path_id]. + * + * @return target_path_id + */ + public int targetPathId() { + return targetPathId(nativeHandle_); + } + + /** + * Compaction outputs will be placed in options.db_paths[target_path_id]. Behavior is undefined if target_path_id is + * out of range. + * + * @param targetPathId target path id + * @return This CompactRangeOptions + */ + public CompactRangeOptions setTargetPathId(final int targetPathId) { + setTargetPathId(nativeHandle_, targetPathId); + return this; + } + + /** + * Returns the policy for compacting the bottommost level + * @return The BottommostLevelCompaction policy + */ + public BottommostLevelCompaction bottommostLevelCompaction() { + return BottommostLevelCompaction.fromRocksId(bottommostLevelCompaction(nativeHandle_)); + } + + /** + * Sets the policy for compacting the bottommost level + * + * @param bottommostLevelCompaction The policy for compacting the bottommost level + * @return This CompactRangeOptions + */ + public CompactRangeOptions setBottommostLevelCompaction(final BottommostLevelCompaction bottommostLevelCompaction) { + setBottommostLevelCompaction(nativeHandle_, bottommostLevelCompaction.getValue()); + return this; + } + + /** + * If true, compaction will execute immediately even if doing so would cause the DB to + * enter write stall mode. Otherwise, it'll sleep until load is low enough. + * @return true if compaction will execute immediately + */ + public boolean allowWriteStall() { + return allowWriteStall(nativeHandle_); + } + + + /** + * If true, compaction will execute immediately even if doing so would cause the DB to + * enter write stall mode. Otherwise, it'll sleep until load is low enough. + * + * @return This CompactRangeOptions + * @param allowWriteStall true if compaction should execute immediately + */ + public CompactRangeOptions setAllowWriteStall(final boolean allowWriteStall) { + setAllowWriteStall(nativeHandle_, allowWriteStall); + return this; + } + + /** + * If > 0, it will replace the option in the DBOptions for this compaction + * @return number of subcompactions + */ + public int maxSubcompactions() { + return maxSubcompactions(nativeHandle_); + } + + /** + * If > 0, it will replace the option in the DBOptions for this compaction + * + * @param maxSubcompactions number of subcompactions + * @return This CompactRangeOptions + */ + public CompactRangeOptions setMaxSubcompactions(final int maxSubcompactions) { + setMaxSubcompactions(nativeHandle_, maxSubcompactions); + return this; + } + + private native static long newCompactRangeOptions(); + @Override protected final native void disposeInternal(final long handle); + + private native boolean exclusiveManualCompaction(final long handle); + private native void setExclusiveManualCompaction(final long handle, + final boolean exclusive_manual_compaction); + private native boolean changeLevel(final long handle); + private native void setChangeLevel(final long handle, + final boolean changeLevel); + private native int targetLevel(final long handle); + private native void setTargetLevel(final long handle, + final int targetLevel); + private native int targetPathId(final long handle); + private native void setTargetPathId(final long handle, + final int targetPathId); + private native int bottommostLevelCompaction(final long handle); + private native void setBottommostLevelCompaction(final long handle, + final int bottommostLevelCompaction); + private native boolean allowWriteStall(final long handle); + private native void setAllowWriteStall(final long handle, + final boolean allowWriteStall); + private native void setMaxSubcompactions(final long handle, + final int maxSubcompactions); + private native int maxSubcompactions(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobInfo.java new file mode 100644 index 000000000..4e3b8d68b --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobInfo.java @@ -0,0 +1,161 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +public class CompactionJobInfo extends RocksObject { + + public CompactionJobInfo() { + super(newCompactionJobInfo()); + } + + /** + * Private as called from JNI C++ + */ + private CompactionJobInfo(final long nativeHandle) { + super(nativeHandle); + // We do not own the native object! + disOwnNativeHandle(); + } + + /** + * Get the name of the column family where the compaction happened. + * + * @return the name of the column family + */ + public byte[] columnFamilyName() { + return columnFamilyName(nativeHandle_); + } + + /** + * Get the status indicating whether the compaction was successful or not. + * + * @return the status + */ + public Status status() { + return status(nativeHandle_); + } + + /** + * Get the id of the thread that completed this compaction job. + * + * @return the id of the thread + */ + public long threadId() { + return threadId(nativeHandle_); + } + + /** + * Get the job id, which is unique in the same thread. + * + * @return the id of the thread + */ + public int jobId() { + return jobId(nativeHandle_); + } + + /** + * Get the smallest input level of the compaction. + * + * @return the input level + */ + public int baseInputLevel() { + return baseInputLevel(nativeHandle_); + } + + /** + * Get the output level of the compaction. + * + * @return the output level + */ + public int outputLevel() { + return outputLevel(nativeHandle_); + } + + /** + * Get the names of the compaction input files. + * + * @return the names of the input files. + */ + public List inputFiles() { + return Arrays.asList(inputFiles(nativeHandle_)); + } + + /** + * Get the names of the compaction output files. + * + * @return the names of the output files. + */ + public List outputFiles() { + return Arrays.asList(outputFiles(nativeHandle_)); + } + + /** + * Get the table properties for the input and output tables. + * + * The map is keyed by values from {@link #inputFiles()} and + * {@link #outputFiles()}. + * + * @return the table properties + */ + public Map tableProperties() { + return tableProperties(nativeHandle_); + } + + /** + * Get the Reason for running the compaction. + * + * @return the reason. + */ + public CompactionReason compactionReason() { + return CompactionReason.fromValue(compactionReason(nativeHandle_)); + } + + // + /** + * Get the compression algorithm used for output files. + * + * @return the compression algorithm + */ + public CompressionType compression() { + return CompressionType.getCompressionType(compression(nativeHandle_)); + } + + /** + * Get detailed information about this compaction. + * + * @return the detailed information, or null if not available. + */ + public /* @Nullable */ CompactionJobStats stats() { + final long statsHandle = stats(nativeHandle_); + if (statsHandle == 0) { + return null; + } + + return new CompactionJobStats(statsHandle); + } + + + private static native long newCompactionJobInfo(); + @Override protected native void disposeInternal(final long handle); + + private static native byte[] columnFamilyName(final long handle); + private static native Status status(final long handle); + private static native long threadId(final long handle); + private static native int jobId(final long handle); + private static native int baseInputLevel(final long handle); + private static native int outputLevel(final long handle); + private static native String[] inputFiles(final long handle); + private static native String[] outputFiles(final long handle); + private static native Map tableProperties( + final long handle); + private static native byte compactionReason(final long handle); + private static native byte compression(final long handle); + private static native long stats(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobStats.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobStats.java new file mode 100644 index 000000000..3d53b5565 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionJobStats.java @@ -0,0 +1,295 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class CompactionJobStats extends RocksObject { + + public CompactionJobStats() { + super(newCompactionJobStats()); + } + + /** + * Private as called from JNI C++ + */ + CompactionJobStats(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Reset the stats. + */ + public void reset() { + reset(nativeHandle_); + } + + /** + * Aggregate the CompactionJobStats from another instance with this one. + * + * @param compactionJobStats another instance of stats. + */ + public void add(final CompactionJobStats compactionJobStats) { + add(nativeHandle_, compactionJobStats.nativeHandle_); + } + + /** + * Get the elapsed time in micro of this compaction. + * + * @return the elapsed time in micro of this compaction. + */ + public long elapsedMicros() { + return elapsedMicros(nativeHandle_); + } + + /** + * Get the number of compaction input records. + * + * @return the number of compaction input records. + */ + public long numInputRecords() { + return numInputRecords(nativeHandle_); + } + + /** + * Get the number of compaction input files. + * + * @return the number of compaction input files. + */ + public long numInputFiles() { + return numInputFiles(nativeHandle_); + } + + /** + * Get the number of compaction input files at the output level. + * + * @return the number of compaction input files at the output level. + */ + public long numInputFilesAtOutputLevel() { + return numInputFilesAtOutputLevel(nativeHandle_); + } + + /** + * Get the number of compaction output records. + * + * @return the number of compaction output records. + */ + public long numOutputRecords() { + return numOutputRecords(nativeHandle_); + } + + /** + * Get the number of compaction output files. + * + * @return the number of compaction output files. + */ + public long numOutputFiles() { + return numOutputFiles(nativeHandle_); + } + + /** + * Determine if the compaction is a manual compaction. + * + * @return true if the compaction is a manual compaction, false otherwise. + */ + public boolean isManualCompaction() { + return isManualCompaction(nativeHandle_); + } + + /** + * Get the size of the compaction input in bytes. + * + * @return the size of the compaction input in bytes. + */ + public long totalInputBytes() { + return totalInputBytes(nativeHandle_); + } + + /** + * Get the size of the compaction output in bytes. + * + * @return the size of the compaction output in bytes. + */ + public long totalOutputBytes() { + return totalOutputBytes(nativeHandle_); + } + + /** + * Get the number of records being replaced by newer record associated + * with same key. + * + * This could be a new value or a deletion entry for that key so this field + * sums up all updated and deleted keys. + * + * @return the number of records being replaced by newer record associated + * with same key. + */ + public long numRecordsReplaced() { + return numRecordsReplaced(nativeHandle_); + } + + /** + * Get the sum of the uncompressed input keys in bytes. + * + * @return the sum of the uncompressed input keys in bytes. + */ + public long totalInputRawKeyBytes() { + return totalInputRawKeyBytes(nativeHandle_); + } + + /** + * Get the sum of the uncompressed input values in bytes. + * + * @return the sum of the uncompressed input values in bytes. + */ + public long totalInputRawValueBytes() { + return totalInputRawValueBytes(nativeHandle_); + } + + /** + * Get the number of deletion entries before compaction. + * + * Deletion entries can disappear after compaction because they expired. + * + * @return the number of deletion entries before compaction. + */ + public long numInputDeletionRecords() { + return numInputDeletionRecords(nativeHandle_); + } + + /** + * Get the number of deletion records that were found obsolete and discarded + * because it is not possible to delete any more keys with this entry. + * (i.e. all possible deletions resulting from it have been completed) + * + * @return the number of deletion records that were found obsolete and + * discarded. + */ + public long numExpiredDeletionRecords() { + return numExpiredDeletionRecords(nativeHandle_); + } + + /** + * Get the number of corrupt keys (ParseInternalKey returned false when + * applied to the key) encountered and written out. + * + * @return the number of corrupt keys. + */ + public long numCorruptKeys() { + return numCorruptKeys(nativeHandle_); + } + + /** + * Get the Time spent on file's Append() call. + * + * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. + * + * @return the Time spent on file's Append() call. + */ + public long fileWriteNanos() { + return fileWriteNanos(nativeHandle_); + } + + /** + * Get the Time spent on sync file range. + * + * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. + * + * @return the Time spent on sync file range. + */ + public long fileRangeSyncNanos() { + return fileRangeSyncNanos(nativeHandle_); + } + + /** + * Get the Time spent on file fsync. + * + * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. + * + * @return the Time spent on file fsync. + */ + public long fileFsyncNanos() { + return fileFsyncNanos(nativeHandle_); + } + + /** + * Get the Time spent on preparing file write (falocate, etc) + * + * Only populated if {@link ColumnFamilyOptions#reportBgIoStats()} is set. + * + * @return the Time spent on preparing file write (falocate, etc). + */ + public long filePrepareWriteNanos() { + return filePrepareWriteNanos(nativeHandle_); + } + + /** + * Get the smallest output key prefix. + * + * @return the smallest output key prefix. + */ + public byte[] smallestOutputKeyPrefix() { + return smallestOutputKeyPrefix(nativeHandle_); + } + + /** + * Get the largest output key prefix. + * + * @return the smallest output key prefix. + */ + public byte[] largestOutputKeyPrefix() { + return largestOutputKeyPrefix(nativeHandle_); + } + + /** + * Get the number of single-deletes which do not meet a put. + * + * @return number of single-deletes which do not meet a put. + */ + @Experimental("Performance optimization for a very specific workload") + public long numSingleDelFallthru() { + return numSingleDelFallthru(nativeHandle_); + } + + /** + * Get the number of single-deletes which meet something other than a put. + * + * @return the number of single-deletes which meet something other than a put. + */ + @Experimental("Performance optimization for a very specific workload") + public long numSingleDelMismatch() { + return numSingleDelMismatch(nativeHandle_); + } + + private static native long newCompactionJobStats(); + @Override protected native void disposeInternal(final long handle); + + + private static native void reset(final long handle); + private static native void add(final long handle, + final long compactionJobStatsHandle); + private static native long elapsedMicros(final long handle); + private static native long numInputRecords(final long handle); + private static native long numInputFiles(final long handle); + private static native long numInputFilesAtOutputLevel(final long handle); + private static native long numOutputRecords(final long handle); + private static native long numOutputFiles(final long handle); + private static native boolean isManualCompaction(final long handle); + private static native long totalInputBytes(final long handle); + private static native long totalOutputBytes(final long handle); + private static native long numRecordsReplaced(final long handle); + private static native long totalInputRawKeyBytes(final long handle); + private static native long totalInputRawValueBytes(final long handle); + private static native long numInputDeletionRecords(final long handle); + private static native long numExpiredDeletionRecords(final long handle); + private static native long numCorruptKeys(final long handle); + private static native long fileWriteNanos(final long handle); + private static native long fileRangeSyncNanos(final long handle); + private static native long fileFsyncNanos(final long handle); + private static native long filePrepareWriteNanos(final long handle); + private static native byte[] smallestOutputKeyPrefix(final long handle); + private static native byte[] largestOutputKeyPrefix(final long handle); + private static native long numSingleDelFallthru(final long handle); + private static native long numSingleDelMismatch(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptions.java new file mode 100644 index 000000000..2c7e391fb --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptions.java @@ -0,0 +1,121 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; + +/** + * CompactionOptions are used in + * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)} + * calls. + */ +public class CompactionOptions extends RocksObject { + + public CompactionOptions() { + super(newCompactionOptions()); + } + + /** + * Get the compaction output compression type. + * + * See {@link #setCompression(CompressionType)}. + * + * @return the compression type. + */ + public CompressionType compression() { + return CompressionType.getCompressionType( + compression(nativeHandle_)); + } + + /** + * Set the compaction output compression type. + * + * Default: snappy + * + * If set to {@link CompressionType#DISABLE_COMPRESSION_OPTION}, + * RocksDB will choose compression type according to the + * {@link ColumnFamilyOptions#compressionType()}, taking into account + * the output level if {@link ColumnFamilyOptions#compressionPerLevel()} + * is specified. + * + * @param compression the compression type to use for compaction output. + * + * @return the instance of the current Options. + */ + public CompactionOptions setCompression(final CompressionType compression) { + setCompression(nativeHandle_, compression.getValue()); + return this; + } + + /** + * Get the compaction output file size limit. + * + * See {@link #setOutputFileSizeLimit(long)}. + * + * @return the file size limit. + */ + public long outputFileSizeLimit() { + return outputFileSizeLimit(nativeHandle_); + } + + /** + * Compaction will create files of size {@link #outputFileSizeLimit()}. + * + * Default: 2^64-1, which means that compaction will create a single file + * + * @param outputFileSizeLimit the size limit + * + * @return the instance of the current Options. + */ + public CompactionOptions setOutputFileSizeLimit( + final long outputFileSizeLimit) { + setOutputFileSizeLimit(nativeHandle_, outputFileSizeLimit); + return this; + } + + /** + * Get the maximum number of threads that will concurrently perform a + * compaction job. + * + * @return the maximum number of threads. + */ + public int maxSubcompactions() { + return maxSubcompactions(nativeHandle_); + } + + /** + * This value represents the maximum number of threads that will + * concurrently perform a compaction job by breaking it into multiple, + * smaller ones that are run simultaneously. + * + * Default: 0 (i.e. no subcompactions) + * + * If > 0, it will replace the option in + * {@link DBOptions#maxSubcompactions()} for this compaction. + * + * @param maxSubcompactions The maximum number of threads that will + * concurrently perform a compaction job + * + * @return the instance of the current Options. + */ + public CompactionOptions setMaxSubcompactions(final int maxSubcompactions) { + setMaxSubcompactions(nativeHandle_, maxSubcompactions); + return this; + } + + private static native long newCompactionOptions(); + @Override protected final native void disposeInternal(final long handle); + + private static native byte compression(final long handle); + private static native void setCompression(final long handle, + final byte compressionTypeValue); + private static native long outputFileSizeLimit(final long handle); + private static native void setOutputFileSizeLimit(final long handle, + final long outputFileSizeLimit); + private static native int maxSubcompactions(final long handle); + private static native void setMaxSubcompactions(final long handle, + final int maxSubcompactions); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java new file mode 100644 index 000000000..4c8d6545c --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsFIFO.java @@ -0,0 +1,89 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Options for FIFO Compaction + */ +public class CompactionOptionsFIFO extends RocksObject { + + public CompactionOptionsFIFO() { + super(newCompactionOptionsFIFO()); + } + + /** + * Once the total sum of table files reaches this, we will delete the oldest + * table file + * + * Default: 1GB + * + * @param maxTableFilesSize The maximum size of the table files + * + * @return the reference to the current options. + */ + public CompactionOptionsFIFO setMaxTableFilesSize( + final long maxTableFilesSize) { + setMaxTableFilesSize(nativeHandle_, maxTableFilesSize); + return this; + } + + /** + * Once the total sum of table files reaches this, we will delete the oldest + * table file + * + * Default: 1GB + * + * @return max table file size in bytes + */ + public long maxTableFilesSize() { + return maxTableFilesSize(nativeHandle_); + } + + /** + * If true, try to do compaction to compact smaller files into larger ones. + * Minimum files to compact follows options.level0_file_num_compaction_trigger + * and compaction won't trigger if average compact bytes per del file is + * larger than options.write_buffer_size. This is to protect large files + * from being compacted again. + * + * Default: false + * + * @param allowCompaction true to allow intra-L0 compaction + * + * @return the reference to the current options. + */ + public CompactionOptionsFIFO setAllowCompaction( + final boolean allowCompaction) { + setAllowCompaction(nativeHandle_, allowCompaction); + return this; + } + + + /** + * Check if intra-L0 compaction is enabled. + * When enabled, we try to compact smaller files into larger ones. + * + * See {@link #setAllowCompaction(boolean)}. + * + * Default: false + * + * @return true if intra-L0 compaction is enabled, false otherwise. + */ + public boolean allowCompaction() { + return allowCompaction(nativeHandle_); + } + + + private native static long newCompactionOptionsFIFO(); + @Override protected final native void disposeInternal(final long handle); + + private native void setMaxTableFilesSize(final long handle, + final long maxTableFilesSize); + private native long maxTableFilesSize(final long handle); + private native void setAllowCompaction(final long handle, + final boolean allowCompaction); + private native boolean allowCompaction(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java new file mode 100644 index 000000000..d2dfa4eef --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionOptionsUniversal.java @@ -0,0 +1,273 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Options for Universal Compaction + */ +public class CompactionOptionsUniversal extends RocksObject { + + public CompactionOptionsUniversal() { + super(newCompactionOptionsUniversal()); + } + + /** + * Percentage flexibility while comparing file size. If the candidate file(s) + * size is 1% smaller than the next file's size, then include next file into + * this candidate set. + * + * Default: 1 + * + * @param sizeRatio The size ratio to use + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setSizeRatio(final int sizeRatio) { + setSizeRatio(nativeHandle_, sizeRatio); + return this; + } + + /** + * Percentage flexibility while comparing file size. If the candidate file(s) + * size is 1% smaller than the next file's size, then include next file into + * this candidate set. + * + * Default: 1 + * + * @return The size ratio in use + */ + public int sizeRatio() { + return sizeRatio(nativeHandle_); + } + + /** + * The minimum number of files in a single compaction run. + * + * Default: 2 + * + * @param minMergeWidth minimum number of files in a single compaction run + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setMinMergeWidth(final int minMergeWidth) { + setMinMergeWidth(nativeHandle_, minMergeWidth); + return this; + } + + /** + * The minimum number of files in a single compaction run. + * + * Default: 2 + * + * @return minimum number of files in a single compaction run + */ + public int minMergeWidth() { + return minMergeWidth(nativeHandle_); + } + + /** + * The maximum number of files in a single compaction run. + * + * Default: {@link Long#MAX_VALUE} + * + * @param maxMergeWidth maximum number of files in a single compaction run + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setMaxMergeWidth(final int maxMergeWidth) { + setMaxMergeWidth(nativeHandle_, maxMergeWidth); + return this; + } + + /** + * The maximum number of files in a single compaction run. + * + * Default: {@link Long#MAX_VALUE} + * + * @return maximum number of files in a single compaction run + */ + public int maxMergeWidth() { + return maxMergeWidth(nativeHandle_); + } + + /** + * The size amplification is defined as the amount (in percentage) of + * additional storage needed to store a single byte of data in the database. + * For example, a size amplification of 2% means that a database that + * contains 100 bytes of user-data may occupy upto 102 bytes of + * physical storage. By this definition, a fully compacted database has + * a size amplification of 0%. Rocksdb uses the following heuristic + * to calculate size amplification: it assumes that all files excluding + * the earliest file contribute to the size amplification. + * + * Default: 200, which means that a 100 byte database could require upto + * 300 bytes of storage. + * + * @param maxSizeAmplificationPercent the amount of additional storage needed + * (as a percentage) to store a single byte in the database + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setMaxSizeAmplificationPercent( + final int maxSizeAmplificationPercent) { + setMaxSizeAmplificationPercent(nativeHandle_, maxSizeAmplificationPercent); + return this; + } + + /** + * The size amplification is defined as the amount (in percentage) of + * additional storage needed to store a single byte of data in the database. + * For example, a size amplification of 2% means that a database that + * contains 100 bytes of user-data may occupy upto 102 bytes of + * physical storage. By this definition, a fully compacted database has + * a size amplification of 0%. Rocksdb uses the following heuristic + * to calculate size amplification: it assumes that all files excluding + * the earliest file contribute to the size amplification. + * + * Default: 200, which means that a 100 byte database could require upto + * 300 bytes of storage. + * + * @return the amount of additional storage needed (as a percentage) to store + * a single byte in the database + */ + public int maxSizeAmplificationPercent() { + return maxSizeAmplificationPercent(nativeHandle_); + } + + /** + * If this option is set to be -1 (the default value), all the output files + * will follow compression type specified. + * + * If this option is not negative, we will try to make sure compressed + * size is just above this value. In normal cases, at least this percentage + * of data will be compressed. + * + * When we are compacting to a new file, here is the criteria whether + * it needs to be compressed: assuming here are the list of files sorted + * by generation time: + * A1...An B1...Bm C1...Ct + * where A1 is the newest and Ct is the oldest, and we are going to compact + * B1...Bm, we calculate the total size of all the files as total_size, as + * well as the total size of C1...Ct as total_C, the compaction output file + * will be compressed iff + * total_C / total_size < this percentage + * + * Default: -1 + * + * @param compressionSizePercent percentage of size for compression + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setCompressionSizePercent( + final int compressionSizePercent) { + setCompressionSizePercent(nativeHandle_, compressionSizePercent); + return this; + } + + /** + * If this option is set to be -1 (the default value), all the output files + * will follow compression type specified. + * + * If this option is not negative, we will try to make sure compressed + * size is just above this value. In normal cases, at least this percentage + * of data will be compressed. + * + * When we are compacting to a new file, here is the criteria whether + * it needs to be compressed: assuming here are the list of files sorted + * by generation time: + * A1...An B1...Bm C1...Ct + * where A1 is the newest and Ct is the oldest, and we are going to compact + * B1...Bm, we calculate the total size of all the files as total_size, as + * well as the total size of C1...Ct as total_C, the compaction output file + * will be compressed iff + * total_C / total_size < this percentage + * + * Default: -1 + * + * @return percentage of size for compression + */ + public int compressionSizePercent() { + return compressionSizePercent(nativeHandle_); + } + + /** + * The algorithm used to stop picking files into a single compaction run + * + * Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize} + * + * @param compactionStopStyle The compaction algorithm + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setStopStyle( + final CompactionStopStyle compactionStopStyle) { + setStopStyle(nativeHandle_, compactionStopStyle.getValue()); + return this; + } + + /** + * The algorithm used to stop picking files into a single compaction run + * + * Default: {@link CompactionStopStyle#CompactionStopStyleTotalSize} + * + * @return The compaction algorithm + */ + public CompactionStopStyle stopStyle() { + return CompactionStopStyle.getCompactionStopStyle(stopStyle(nativeHandle_)); + } + + /** + * Option to optimize the universal multi level compaction by enabling + * trivial move for non overlapping files. + * + * Default: false + * + * @param allowTrivialMove true if trivial move is allowed + * + * @return the reference to the current options. + */ + public CompactionOptionsUniversal setAllowTrivialMove( + final boolean allowTrivialMove) { + setAllowTrivialMove(nativeHandle_, allowTrivialMove); + return this; + } + + /** + * Option to optimize the universal multi level compaction by enabling + * trivial move for non overlapping files. + * + * Default: false + * + * @return true if trivial move is allowed + */ + public boolean allowTrivialMove() { + return allowTrivialMove(nativeHandle_); + } + + private native static long newCompactionOptionsUniversal(); + @Override protected final native void disposeInternal(final long handle); + + private native void setSizeRatio(final long handle, final int sizeRatio); + private native int sizeRatio(final long handle); + private native void setMinMergeWidth( + final long handle, final int minMergeWidth); + private native int minMergeWidth(final long handle); + private native void setMaxMergeWidth( + final long handle, final int maxMergeWidth); + private native int maxMergeWidth(final long handle); + private native void setMaxSizeAmplificationPercent( + final long handle, final int maxSizeAmplificationPercent); + private native int maxSizeAmplificationPercent(final long handle); + private native void setCompressionSizePercent( + final long handle, final int compressionSizePercent); + private native int compressionSizePercent(final long handle); + private native void setStopStyle( + final long handle, final byte stopStyle); + private native byte stopStyle(final long handle); + private native void setAllowTrivialMove( + final long handle, final boolean allowTrivialMove); + private native boolean allowTrivialMove(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java new file mode 100644 index 000000000..eda05942e --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionPriority.java @@ -0,0 +1,81 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Compaction Priorities + */ +public enum CompactionPriority { + + /** + * Slightly Prioritize larger files by size compensated by #deletes + */ + ByCompensatedSize((byte)0x0), + + /** + * First compact files whose data's latest update time is oldest. + * Try this if you only update some hot keys in small ranges. + */ + OldestLargestSeqFirst((byte)0x1), + + /** + * First compact files whose range hasn't been compacted to the next level + * for the longest. If your updates are random across the key space, + * write amplification is slightly better with this option. + */ + OldestSmallestSeqFirst((byte)0x2), + + /** + * First compact files whose ratio between overlapping size in next level + * and its size is the smallest. It in many cases can optimize write + * amplification. + */ + MinOverlappingRatio((byte)0x3), + + /** + * Keeps a cursor(s) of the successor of the file (key range) was/were + * compacted before, and always picks the next files (key range) in that + * level. The file picking process will cycle through all the files in a + * round-robin manner. + */ + RoundRobin((byte)0x4); + + + private final byte value; + + CompactionPriority(final byte value) { + this.value = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + * Get CompactionPriority by byte value. + * + * @param value byte representation of CompactionPriority. + * + * @return {@link org.rocksdb.CompactionPriority} instance or null. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static CompactionPriority getCompactionPriority(final byte value) { + for (final CompactionPriority compactionPriority : + CompactionPriority.values()) { + if (compactionPriority.getValue() == value){ + return compactionPriority; + } + } + throw new IllegalArgumentException( + "Illegal value provided for CompactionPriority."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionReason.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionReason.java new file mode 100644 index 000000000..24e234450 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionReason.java @@ -0,0 +1,125 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum CompactionReason { + kUnknown((byte)0x0), + + /** + * [Level] number of L0 files > level0_file_num_compaction_trigger + */ + kLevelL0FilesNum((byte)0x1), + + /** + * [Level] total size of level > MaxBytesForLevel() + */ + kLevelMaxLevelSize((byte)0x2), + + /** + * [Universal] Compacting for size amplification + */ + kUniversalSizeAmplification((byte)0x3), + + /** + * [Universal] Compacting for size ratio + */ + kUniversalSizeRatio((byte)0x4), + + /** + * [Universal] number of sorted runs > level0_file_num_compaction_trigger + */ + kUniversalSortedRunNum((byte)0x5), + + /** + * [FIFO] total size > max_table_files_size + */ + kFIFOMaxSize((byte)0x6), + + /** + * [FIFO] reduce number of files. + */ + kFIFOReduceNumFiles((byte)0x7), + + /** + * [FIFO] files with creation time < (current_time - interval) + */ + kFIFOTtl((byte)0x8), + + /** + * Manual compaction + */ + kManualCompaction((byte)0x9), + + /** + * DB::SuggestCompactRange() marked files for compaction + */ + kFilesMarkedForCompaction((byte)0x10), + + /** + * [Level] Automatic compaction within bottommost level to cleanup duplicate + * versions of same user key, usually due to a released snapshot. + */ + kBottommostFiles((byte)0x0A), + + /** + * Compaction based on TTL + */ + kTtl((byte)0x0B), + + /** + * According to the comments in flush_job.cc, RocksDB treats flush as + * a level 0 compaction in internal stats. + */ + kFlush((byte)0x0C), + + /** + * Compaction caused by external sst file ingestion + */ + kExternalSstIngestion((byte) 0x0D), + + /** + * Compaction due to SST file being too old + */ + kPeriodicCompaction((byte) 0x0E), + + /** + * Compaction in order to move files to temperature + */ + kChangeTemperature((byte) 0x0F); + + private final byte value; + + CompactionReason(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value + */ + byte getValue() { + return value; + } + + /** + * Get the CompactionReason from the internal representation value. + * + * @return the compaction reason. + * + * @throws IllegalArgumentException if the value is unknown. + */ + static CompactionReason fromValue(final byte value) { + for (final CompactionReason compactionReason : CompactionReason.values()) { + if(compactionReason.value == value) { + return compactionReason; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for CompactionReason: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java new file mode 100644 index 000000000..f6e63209c --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStopStyle.java @@ -0,0 +1,55 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +/** + * Algorithm used to make a compaction request stop picking new files + * into a single compaction run + */ +public enum CompactionStopStyle { + + /** + * Pick files of similar size + */ + CompactionStopStyleSimilarSize((byte)0x0), + + /** + * Total size of picked files > next file + */ + CompactionStopStyleTotalSize((byte)0x1); + + + private final byte value; + + CompactionStopStyle(final byte value) { + this.value = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + * Get CompactionStopStyle by byte value. + * + * @param value byte representation of CompactionStopStyle. + * + * @return {@link org.rocksdb.CompactionStopStyle} instance or null. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static CompactionStopStyle getCompactionStopStyle(final byte value) { + for (final CompactionStopStyle compactionStopStyle : + CompactionStopStyle.values()) { + if (compactionStopStyle.getValue() == value){ + return compactionStopStyle; + } + } + throw new IllegalArgumentException( + "Illegal value provided for CompactionStopStyle."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java new file mode 100644 index 000000000..b24bbf850 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java @@ -0,0 +1,80 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; + +/** + * Enum CompactionStyle + * + * RocksDB supports different styles of compaction. Available + * compaction styles can be chosen using this enumeration. + * + *
    + *
  1. LEVEL - Level based Compaction style
  2. + *
  3. UNIVERSAL - Universal Compaction Style is a + * compaction style, targeting the use cases requiring lower write + * amplification, trading off read amplification and space + * amplification.
  4. + *
  5. FIFO - FIFO compaction style is the simplest + * compaction strategy. It is suited for keeping event log data with + * very low overhead (query log for example). It periodically deletes + * the old data, so it's basically a TTL compaction style.
  6. + *
  7. NONE - Disable background compaction. + * Compaction jobs are submitted + * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, CompactionJobInfo)} ()}.
  8. + *
+ * + * @see + * Universal Compaction + * @see + * FIFO Compaction + */ +public enum CompactionStyle { + LEVEL((byte) 0x0), + UNIVERSAL((byte) 0x1), + FIFO((byte) 0x2), + NONE((byte) 0x3); + + private final byte value; + + CompactionStyle(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ + //TODO(AR) should be made package-private + public byte getValue() { + return value; + } + + /** + * Get the Compaction style from the internal representation value. + * + * @param value the internal representation value. + * + * @return the Compaction style + * + * @throws IllegalArgumentException if the value does not match a + * CompactionStyle + */ + static CompactionStyle fromValue(final byte value) + throws IllegalArgumentException { + for (final CompactionStyle compactionStyle : CompactionStyle.values()) { + if (compactionStyle.value == value) { + return compactionStyle; + } + } + throw new IllegalArgumentException("Unknown value for CompactionStyle: " + + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java new file mode 100644 index 000000000..8c3162858 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java @@ -0,0 +1,133 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +/** + * This class controls the behaviour + * of Java implementations of + * AbstractComparator + * + * Note that dispose() must be called before a ComparatorOptions + * instance becomes out-of-scope to release the allocated memory in C++. + */ +public class ComparatorOptions extends RocksObject { + public ComparatorOptions() { + super(newComparatorOptions()); + } + + /** + * Get the synchronisation type used to guard the reused buffers. + * Only used if {@link #maxReusedBufferSize()} > 0 + * Default: {@link ReusedSynchronisationType#ADAPTIVE_MUTEX} + * + * @return the synchronisation type + */ + public ReusedSynchronisationType reusedSynchronisationType() { + assert(isOwningHandle()); + return ReusedSynchronisationType.getReusedSynchronisationType( + reusedSynchronisationType(nativeHandle_)); + } + + /** + * Set the synchronisation type used to guard the reused buffers. + * Only used if {@link #maxReusedBufferSize()} > 0 + * Default: {@link ReusedSynchronisationType#ADAPTIVE_MUTEX} + * + * @param reusedSynchronisationType the synchronisation type + * + * @return the reference to the current comparator options. + */ + public ComparatorOptions setReusedSynchronisationType( + final ReusedSynchronisationType reusedSynchronisationType) { + assert (isOwningHandle()); + setReusedSynchronisationType(nativeHandle_, + reusedSynchronisationType.getValue()); + return this; + } + + /** + * Indicates if a direct byte buffer (i.e. outside of the normal + * garbage-collected heap) is used, as opposed to a non-direct byte buffer + * which is a wrapper around an on-heap byte[]. + * + * Default: true + * + * @return true if a direct byte buffer will be used, false otherwise + */ + public boolean useDirectBuffer() { + assert(isOwningHandle()); + return useDirectBuffer(nativeHandle_); + } + + /** + * Controls whether a direct byte buffer (i.e. outside of the normal + * garbage-collected heap) is used, as opposed to a non-direct byte buffer + * which is a wrapper around an on-heap byte[]. + * + * Default: true + * + * @param useDirectBuffer true if a direct byte buffer should be used, + * false otherwise + * @return the reference to the current comparator options. + */ + public ComparatorOptions setUseDirectBuffer(final boolean useDirectBuffer) { + assert(isOwningHandle()); + setUseDirectBuffer(nativeHandle_, useDirectBuffer); + return this; + } + + /** + * Maximum size of a buffer (in bytes) that will be reused. + * Comparators will use 5 of these buffers, + * so the retained memory size will be 5 * max_reused_buffer_size. + * When a buffer is needed for transferring data to a callback, + * if it requires less than {@code maxReuseBufferSize}, then an + * existing buffer will be reused, else a new buffer will be + * allocated just for that callback. + * + * Default: 64 bytes + * + * @return the maximum size of a buffer which is reused, + * or 0 if reuse is disabled + */ + public int maxReusedBufferSize() { + assert(isOwningHandle()); + return maxReusedBufferSize(nativeHandle_); + } + + /** + * Sets the maximum size of a buffer (in bytes) that will be reused. + * Comparators will use 5 of these buffers, + * so the retained memory size will be 5 * max_reused_buffer_size. + * When a buffer is needed for transferring data to a callback, + * if it requires less than {@code maxReuseBufferSize}, then an + * existing buffer will be reused, else a new buffer will be + * allocated just for that callback. + * + * Default: 64 bytes + * + * @param maxReusedBufferSize the maximum size for a buffer to reuse, or 0 to + * disable reuse + * + * @return the maximum size of a buffer which is reused + */ + public ComparatorOptions setMaxReusedBufferSize(final int maxReusedBufferSize) { + assert(isOwningHandle()); + setMaxReusedBufferSize(nativeHandle_, maxReusedBufferSize); + return this; + } + + private native static long newComparatorOptions(); + private native byte reusedSynchronisationType(final long handle); + private native void setReusedSynchronisationType(final long handle, + final byte reusedSynchronisationType); + private native boolean useDirectBuffer(final long handle); + private native void setUseDirectBuffer(final long handle, + final boolean useDirectBuffer); + private native int maxReusedBufferSize(final long handle); + private native void setMaxReusedBufferSize(final long handle, + final int maxReuseBufferSize); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorType.java b/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorType.java new file mode 100644 index 000000000..199980b6e --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ComparatorType.java @@ -0,0 +1,48 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +enum ComparatorType { + JAVA_COMPARATOR((byte)0x0), + JAVA_NATIVE_COMPARATOR_WRAPPER((byte)0x1); + + private final byte value; + + ComparatorType(final byte value) { + this.value = value; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + byte getValue() { + return value; + } + + /** + *

Get the ComparatorType enumeration value by + * passing the byte identifier to this method.

+ * + * @param byteIdentifier of ComparatorType. + * + * @return ComparatorType instance. + * + * @throws IllegalArgumentException if the comparator type for the byteIdentifier + * cannot be found + */ + static ComparatorType getComparatorType(final byte byteIdentifier) { + for (final ComparatorType comparatorType : ComparatorType.values()) { + if (comparatorType.getValue() == byteIdentifier) { + return comparatorType; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for ComparatorType."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java new file mode 100644 index 000000000..a9072bbb9 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompressionOptions.java @@ -0,0 +1,151 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Options for Compression + */ +public class CompressionOptions extends RocksObject { + + public CompressionOptions() { + super(newCompressionOptions()); + } + + public CompressionOptions setWindowBits(final int windowBits) { + setWindowBits(nativeHandle_, windowBits); + return this; + } + + public int windowBits() { + return windowBits(nativeHandle_); + } + + public CompressionOptions setLevel(final int level) { + setLevel(nativeHandle_, level); + return this; + } + + public int level() { + return level(nativeHandle_); + } + + public CompressionOptions setStrategy(final int strategy) { + setStrategy(nativeHandle_, strategy); + return this; + } + + public int strategy() { + return strategy(nativeHandle_); + } + + /** + * Maximum size of dictionary used to prime the compression library. Currently + * this dictionary will be constructed by sampling the first output file in a + * subcompaction when the target level is bottommost. This dictionary will be + * loaded into the compression library before compressing/uncompressing each + * data block of subsequent files in the subcompaction. Effectively, this + * improves compression ratios when there are repetitions across data blocks. + * + * A value of 0 indicates the feature is disabled. + * + * Default: 0. + * + * @param maxDictBytes Maximum bytes to use for the dictionary + * + * @return the reference to the current options + */ + public CompressionOptions setMaxDictBytes(final int maxDictBytes) { + setMaxDictBytes(nativeHandle_, maxDictBytes); + return this; + } + + /** + * Maximum size of dictionary used to prime the compression library. + * + * @return The maximum bytes to use for the dictionary + */ + public int maxDictBytes() { + return maxDictBytes(nativeHandle_); + } + + /** + * Maximum size of training data passed to zstd's dictionary trainer. Using + * zstd's dictionary trainer can achieve even better compression ratio + * improvements than using {@link #setMaxDictBytes(int)} alone. + * + * The training data will be used to generate a dictionary + * of {@link #maxDictBytes()}. + * + * Default: 0. + * + * @param zstdMaxTrainBytes Maximum bytes to use for training ZStd. + * + * @return the reference to the current options + */ + public CompressionOptions setZStdMaxTrainBytes(final int zstdMaxTrainBytes) { + setZstdMaxTrainBytes(nativeHandle_, zstdMaxTrainBytes); + return this; + } + + /** + * Maximum size of training data passed to zstd's dictionary trainer. + * + * @return Maximum bytes to use for training ZStd + */ + public int zstdMaxTrainBytes() { + return zstdMaxTrainBytes(nativeHandle_); + } + + /** + * When the compression options are set by the user, it will be set to "true". + * For bottommost_compression_opts, to enable it, user must set enabled=true. + * Otherwise, bottommost compression will use compression_opts as default + * compression options. + * + * For compression_opts, if compression_opts.enabled=false, it is still + * used as compression options for compression process. + * + * Default: false. + * + * @param enabled true to use these compression options + * for the bottommost_compression_opts, false otherwise + * + * @return the reference to the current options + */ + public CompressionOptions setEnabled(final boolean enabled) { + setEnabled(nativeHandle_, enabled); + return this; + } + + /** + * Determine whether these compression options + * are used for the bottommost_compression_opts. + * + * @return true if these compression options are used + * for the bottommost_compression_opts, false otherwise + */ + public boolean enabled() { + return enabled(nativeHandle_); + } + + + private native static long newCompressionOptions(); + @Override protected final native void disposeInternal(final long handle); + + private native void setWindowBits(final long handle, final int windowBits); + private native int windowBits(final long handle); + private native void setLevel(final long handle, final int level); + private native int level(final long handle); + private native void setStrategy(final long handle, final int strategy); + private native int strategy(final long handle); + private native void setMaxDictBytes(final long handle, final int maxDictBytes); + private native int maxDictBytes(final long handle); + private native void setZstdMaxTrainBytes(final long handle, + final int zstdMaxTrainBytes); + private native int zstdMaxTrainBytes(final long handle); + private native void setEnabled(final long handle, final boolean enabled); + private native boolean enabled(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java b/src/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java new file mode 100644 index 000000000..d1d73d51a --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java @@ -0,0 +1,121 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Enum CompressionType + * + *

DB contents are stored in a set of blocks, each of which holds a + * sequence of key,value pairs. Each block may be compressed before + * being stored in a file. The following enum describes which + * compression method (if any) is used to compress a block.

+ */ +public enum CompressionType { + NO_COMPRESSION((byte) 0x0, null, "kNoCompression"), + SNAPPY_COMPRESSION((byte) 0x1, "snappy", "kSnappyCompression"), + ZLIB_COMPRESSION((byte) 0x2, "z", "kZlibCompression"), + BZLIB2_COMPRESSION((byte) 0x3, "bzip2", "kBZip2Compression"), + LZ4_COMPRESSION((byte) 0x4, "lz4", "kLZ4Compression"), + LZ4HC_COMPRESSION((byte) 0x5, "lz4hc", "kLZ4HCCompression"), + XPRESS_COMPRESSION((byte) 0x6, "xpress", "kXpressCompression"), + ZSTD_COMPRESSION((byte) 0x7, "zstd", "kZSTD"), + DISABLE_COMPRESSION_OPTION((byte) 0x7F, null, "kDisableCompressionOption"); + + /** + *

Get the CompressionType enumeration value by + * passing the library name to this method.

+ * + *

If library cannot be found the enumeration + * value {@code NO_COMPRESSION} will be returned.

+ * + * @param libraryName compression library name. + * + * @return CompressionType instance. + */ + public static CompressionType getCompressionType(String libraryName) { + if (libraryName != null) { + for (CompressionType compressionType : CompressionType.values()) { + if (compressionType.getLibraryName() != null && + compressionType.getLibraryName().equals(libraryName)) { + return compressionType; + } + } + } + return CompressionType.NO_COMPRESSION; + } + + /** + *

Get the CompressionType enumeration value by + * passing the byte identifier to this method.

+ * + * @param byteIdentifier of CompressionType. + * + * @return CompressionType instance. + * + * @throws IllegalArgumentException If CompressionType cannot be found for the + * provided byteIdentifier + */ + public static CompressionType getCompressionType(byte byteIdentifier) { + for (final CompressionType compressionType : CompressionType.values()) { + if (compressionType.getValue() == byteIdentifier) { + return compressionType; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for CompressionType."); + } + + /** + *

Get a CompressionType value based on the string key in the C++ options output. + * This gets used in support of getting options into Java from an options string, + * which is generated at the C++ level. + *

+ * + * @param internalName the internal (C++) name by which the option is known. + * + * @return CompressionType instance (optional) + */ + static CompressionType getFromInternal(final String internalName) { + for (final CompressionType compressionType : CompressionType.values()) { + if (compressionType.internalName_.equals(internalName)) { + return compressionType; + } + } + + throw new IllegalArgumentException( + "Illegal internalName '" + internalName + " ' provided for CompressionType."); + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + /** + *

Returns the library name of the compression type + * identified by the enumeration value.

+ * + * @return library name + */ + public String getLibraryName() { + return libraryName_; + } + + CompressionType(final byte value, final String libraryName, final String internalName) { + value_ = value; + libraryName_ = libraryName; + internalName_ = internalName; + } + + private final byte value_; + private final String libraryName_; + private final String internalName_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java b/src/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java new file mode 100644 index 000000000..b4e34303b --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiter.java @@ -0,0 +1,44 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public abstract class ConcurrentTaskLimiter extends RocksObject { + protected ConcurrentTaskLimiter(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Returns a name that identifies this concurrent task limiter. + * + * @return Concurrent task limiter name. + */ + public abstract String name(); + + /** + * Set max concurrent tasks.
+ * limit = 0 means no new task allowed.
+ * limit < 0 means no limitation. + * + * @param maxOutstandinsTask max concurrent tasks. + * @return the reference to the current instance of ConcurrentTaskLimiter. + */ + public abstract ConcurrentTaskLimiter setMaxOutstandingTask(final int maxOutstandinsTask); + + /** + * Reset to unlimited max concurrent task. + * + * @return the reference to the current instance of ConcurrentTaskLimiter. + */ + public abstract ConcurrentTaskLimiter resetMaxOutstandingTask(); + + /** + * Returns current outstanding task count. + * + * @return current outstanding task count. + */ + public abstract int outstandingTask(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java b/src/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java new file mode 100644 index 000000000..d28b9060a --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ConcurrentTaskLimiterImpl.java @@ -0,0 +1,48 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class ConcurrentTaskLimiterImpl extends ConcurrentTaskLimiter { + public ConcurrentTaskLimiterImpl(final String name, final int maxOutstandingTask) { + super(newConcurrentTaskLimiterImpl0(name, maxOutstandingTask)); + } + + @Override + public String name() { + assert (isOwningHandle()); + return name(nativeHandle_); + } + + @Override + public ConcurrentTaskLimiter setMaxOutstandingTask(final int maxOutstandingTask) { + assert (isOwningHandle()); + setMaxOutstandingTask(nativeHandle_, maxOutstandingTask); + return this; + } + + @Override + public ConcurrentTaskLimiter resetMaxOutstandingTask() { + assert (isOwningHandle()); + resetMaxOutstandingTask(nativeHandle_); + return this; + } + + @Override + public int outstandingTask() { + assert (isOwningHandle()); + return outstandingTask(nativeHandle_); + } + + private static native long newConcurrentTaskLimiterImpl0( + final String name, final int maxOutstandingTask); + private static native String name(final long handle); + private static native void setMaxOutstandingTask(final long handle, final int limit); + private static native void resetMaxOutstandingTask(final long handle); + private static native int outstandingTask(final long handle); + + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ConfigOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/ConfigOptions.java new file mode 100644 index 000000000..4d93f0c99 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ConfigOptions.java @@ -0,0 +1,53 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class ConfigOptions extends RocksObject { + static { + RocksDB.loadLibrary(); + } + + /** + * Construct with default Options + */ + public ConfigOptions() { + super(newConfigOptions()); + } + + public ConfigOptions setDelimiter(final String delimiter) { + setDelimiter(nativeHandle_, delimiter); + return this; + } + public ConfigOptions setIgnoreUnknownOptions(final boolean ignore) { + setIgnoreUnknownOptions(nativeHandle_, ignore); + return this; + } + + public ConfigOptions setEnv(final Env env) { + setEnv(nativeHandle_, env.nativeHandle_); + return this; + } + + public ConfigOptions setInputStringsEscaped(final boolean escaped) { + setInputStringsEscaped(nativeHandle_, escaped); + return this; + } + + public ConfigOptions setSanityLevel(final SanityLevel level) { + setSanityLevel(nativeHandle_, level.getValue()); + return this; + } + + @Override protected final native void disposeInternal(final long handle); + + private native static long newConfigOptions(); + private native static void setEnv(final long handle, final long envHandle); + private native static void setDelimiter(final long handle, final String delimiter); + private native static void setIgnoreUnknownOptions(final long handle, final boolean ignore); + private native static void setInputStringsEscaped(final long handle, final boolean escaped); + private native static void setSanityLevel(final long handle, final byte level); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java new file mode 100644 index 000000000..543222262 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java @@ -0,0 +1,1495 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.file.Paths; +import java.util.*; + +/** + * DBOptions to control the behavior of a database. It will be used + * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). + * + * As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable} + * and will be automatically released if opened in the preamble of a try with resources block. + */ +public class DBOptions extends RocksObject + implements DBOptionsInterface, + MutableDBOptionsInterface { + static { + RocksDB.loadLibrary(); + } + + /** + * Construct DBOptions. + * + * This constructor will create (by allocating a block of memory) + * an {@code rocksdb::DBOptions} in the c++ side. + */ + public DBOptions() { + super(newDBOptions()); + numShardBits_ = DEFAULT_NUM_SHARD_BITS; + } + + /** + * Copy constructor for DBOptions. + * + * NOTE: This does a shallow copy, which means env, rate_limiter, sst_file_manager, + * info_log and other pointers will be cloned! + * + * @param other The DBOptions to copy. + */ + public DBOptions(DBOptions other) { + super(copyDBOptions(other.nativeHandle_)); + this.env_ = other.env_; + this.numShardBits_ = other.numShardBits_; + this.rateLimiter_ = other.rateLimiter_; + this.rowCache_ = other.rowCache_; + this.walFilter_ = other.walFilter_; + this.writeBufferManager_ = other.writeBufferManager_; + } + + /** + * Constructor from Options + * + * @param options The options. + */ + public DBOptions(final Options options) { + super(newDBOptionsFromOptions(options.nativeHandle_)); + } + + /** + *

Method to get a options instance by using pre-configured + * property values. If one or many values are undefined in + * the context of RocksDB the method will return a null + * value.

+ * + *

Note: Property keys can be derived from + * getter methods within the options class. Example: the method + * {@code allowMmapReads()} has a property key: + * {@code allow_mmap_reads}.

+ * + * @param cfgOpts The ConfigOptions to control how the string is processed. + * @param properties {@link java.util.Properties} instance. + * + * @return {@link org.rocksdb.DBOptions instance} + * or null. + * + * @throws java.lang.IllegalArgumentException if null or empty + * {@link java.util.Properties} instance is passed to the method call. + */ + public static DBOptions getDBOptionsFromProps( + final ConfigOptions cfgOpts, final Properties properties) { + DBOptions dbOptions = null; + final String optionsString = Options.getOptionStringFromProps(properties); + final long handle = getDBOptionsFromProps(cfgOpts.nativeHandle_, optionsString); + if (handle != 0) { + dbOptions = new DBOptions(handle); + } + return dbOptions; + } + + /** + *

Method to get a options instance by using pre-configured + * property values. If one or many values are undefined in + * the context of RocksDB the method will return a null + * value.

+ * + *

Note: Property keys can be derived from + * getter methods within the options class. Example: the method + * {@code allowMmapReads()} has a property key: + * {@code allow_mmap_reads}.

+ * + * @param properties {@link java.util.Properties} instance. + * + * @return {@link org.rocksdb.DBOptions instance} + * or null. + * + * @throws java.lang.IllegalArgumentException if null or empty + * {@link java.util.Properties} instance is passed to the method call. + */ + public static DBOptions getDBOptionsFromProps(final Properties properties) { + DBOptions dbOptions = null; + final String optionsString = Options.getOptionStringFromProps(properties); + final long handle = getDBOptionsFromProps(optionsString); + if (handle != 0) { + dbOptions = new DBOptions(handle); + } + return dbOptions; + } + + @Override + public DBOptions optimizeForSmallDb() { + optimizeForSmallDb(nativeHandle_); + return this; + } + + @Override + public DBOptions setIncreaseParallelism( + final int totalThreads) { + assert(isOwningHandle()); + setIncreaseParallelism(nativeHandle_, totalThreads); + return this; + } + + @Override + public DBOptions setCreateIfMissing(final boolean flag) { + assert(isOwningHandle()); + setCreateIfMissing(nativeHandle_, flag); + return this; + } + + @Override + public boolean createIfMissing() { + assert(isOwningHandle()); + return createIfMissing(nativeHandle_); + } + + @Override + public DBOptions setCreateMissingColumnFamilies( + final boolean flag) { + assert(isOwningHandle()); + setCreateMissingColumnFamilies(nativeHandle_, flag); + return this; + } + + @Override + public boolean createMissingColumnFamilies() { + assert(isOwningHandle()); + return createMissingColumnFamilies(nativeHandle_); + } + + @Override + public DBOptions setErrorIfExists( + final boolean errorIfExists) { + assert(isOwningHandle()); + setErrorIfExists(nativeHandle_, errorIfExists); + return this; + } + + @Override + public boolean errorIfExists() { + assert(isOwningHandle()); + return errorIfExists(nativeHandle_); + } + + @Override + public DBOptions setParanoidChecks( + final boolean paranoidChecks) { + assert(isOwningHandle()); + setParanoidChecks(nativeHandle_, paranoidChecks); + return this; + } + + @Override + public boolean paranoidChecks() { + assert(isOwningHandle()); + return paranoidChecks(nativeHandle_); + } + + @Override + public DBOptions setEnv(final Env env) { + setEnv(nativeHandle_, env.nativeHandle_); + this.env_ = env; + return this; + } + + @Override + public Env getEnv() { + return env_; + } + + @Override + public DBOptions setRateLimiter(final RateLimiter rateLimiter) { + assert(isOwningHandle()); + rateLimiter_ = rateLimiter; + setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_); + return this; + } + + @Override + public DBOptions setSstFileManager(final SstFileManager sstFileManager) { + assert(isOwningHandle()); + setSstFileManager(nativeHandle_, sstFileManager.nativeHandle_); + return this; + } + + @Override + public DBOptions setLogger(final Logger logger) { + assert(isOwningHandle()); + setLogger(nativeHandle_, logger.nativeHandle_); + return this; + } + + @Override + public DBOptions setInfoLogLevel( + final InfoLogLevel infoLogLevel) { + assert(isOwningHandle()); + setInfoLogLevel(nativeHandle_, infoLogLevel.getValue()); + return this; + } + + @Override + public InfoLogLevel infoLogLevel() { + assert(isOwningHandle()); + return InfoLogLevel.getInfoLogLevel( + infoLogLevel(nativeHandle_)); + } + + @Override + public DBOptions setMaxOpenFiles( + final int maxOpenFiles) { + assert(isOwningHandle()); + setMaxOpenFiles(nativeHandle_, maxOpenFiles); + return this; + } + + @Override + public int maxOpenFiles() { + assert(isOwningHandle()); + return maxOpenFiles(nativeHandle_); + } + + @Override + public DBOptions setMaxFileOpeningThreads(final int maxFileOpeningThreads) { + assert(isOwningHandle()); + setMaxFileOpeningThreads(nativeHandle_, maxFileOpeningThreads); + return this; + } + + @Override + public int maxFileOpeningThreads() { + assert(isOwningHandle()); + return maxFileOpeningThreads(nativeHandle_); + } + + @Override + public DBOptions setMaxTotalWalSize( + final long maxTotalWalSize) { + assert(isOwningHandle()); + setMaxTotalWalSize(nativeHandle_, maxTotalWalSize); + return this; + } + + @Override + public long maxTotalWalSize() { + assert(isOwningHandle()); + return maxTotalWalSize(nativeHandle_); + } + + @Override + public DBOptions setStatistics(final Statistics statistics) { + assert(isOwningHandle()); + setStatistics(nativeHandle_, statistics.nativeHandle_); + return this; + } + + @Override + public Statistics statistics() { + assert(isOwningHandle()); + final long statisticsNativeHandle = statistics(nativeHandle_); + if(statisticsNativeHandle == 0) { + return null; + } else { + return new Statistics(statisticsNativeHandle); + } + } + + @Override + public DBOptions setUseFsync( + final boolean useFsync) { + assert(isOwningHandle()); + setUseFsync(nativeHandle_, useFsync); + return this; + } + + @Override + public boolean useFsync() { + assert(isOwningHandle()); + return useFsync(nativeHandle_); + } + + @Override + public DBOptions setDbPaths(final Collection dbPaths) { + assert(isOwningHandle()); + + final int len = dbPaths.size(); + final String[] paths = new String[len]; + final long[] targetSizes = new long[len]; + + int i = 0; + for(final DbPath dbPath : dbPaths) { + paths[i] = dbPath.path.toString(); + targetSizes[i] = dbPath.targetSize; + i++; + } + setDbPaths(nativeHandle_, paths, targetSizes); + return this; + } + + @Override + public List dbPaths() { + final int len = (int)dbPathsLen(nativeHandle_); + if(len == 0) { + return Collections.emptyList(); + } else { + final String[] paths = new String[len]; + final long[] targetSizes = new long[len]; + + dbPaths(nativeHandle_, paths, targetSizes); + + final List dbPaths = new ArrayList<>(); + for(int i = 0; i < len; i++) { + dbPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i])); + } + return dbPaths; + } + } + + @Override + public DBOptions setDbLogDir( + final String dbLogDir) { + assert(isOwningHandle()); + setDbLogDir(nativeHandle_, dbLogDir); + return this; + } + + @Override + public String dbLogDir() { + assert(isOwningHandle()); + return dbLogDir(nativeHandle_); + } + + @Override + public DBOptions setWalDir( + final String walDir) { + assert(isOwningHandle()); + setWalDir(nativeHandle_, walDir); + return this; + } + + @Override + public String walDir() { + assert(isOwningHandle()); + return walDir(nativeHandle_); + } + + @Override + public DBOptions setDeleteObsoleteFilesPeriodMicros( + final long micros) { + assert(isOwningHandle()); + setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros); + return this; + } + + @Override + public long deleteObsoleteFilesPeriodMicros() { + assert(isOwningHandle()); + return deleteObsoleteFilesPeriodMicros(nativeHandle_); + } + + @Override + public DBOptions setMaxBackgroundJobs(final int maxBackgroundJobs) { + assert(isOwningHandle()); + setMaxBackgroundJobs(nativeHandle_, maxBackgroundJobs); + return this; + } + + @Override + public int maxBackgroundJobs() { + assert(isOwningHandle()); + return maxBackgroundJobs(nativeHandle_); + } + + @Override + @Deprecated + public DBOptions setMaxBackgroundCompactions( + final int maxBackgroundCompactions) { + assert(isOwningHandle()); + setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions); + return this; + } + + @Override + @Deprecated + public int maxBackgroundCompactions() { + assert(isOwningHandle()); + return maxBackgroundCompactions(nativeHandle_); + } + + @Override + public DBOptions setMaxSubcompactions(final int maxSubcompactions) { + assert(isOwningHandle()); + setMaxSubcompactions(nativeHandle_, maxSubcompactions); + return this; + } + + @Override + public int maxSubcompactions() { + assert(isOwningHandle()); + return maxSubcompactions(nativeHandle_); + } + + @Override + @Deprecated + public DBOptions setMaxBackgroundFlushes( + final int maxBackgroundFlushes) { + assert(isOwningHandle()); + setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes); + return this; + } + + @Override + @Deprecated + public int maxBackgroundFlushes() { + assert(isOwningHandle()); + return maxBackgroundFlushes(nativeHandle_); + } + + @Override + public DBOptions setMaxLogFileSize(final long maxLogFileSize) { + assert(isOwningHandle()); + setMaxLogFileSize(nativeHandle_, maxLogFileSize); + return this; + } + + @Override + public long maxLogFileSize() { + assert(isOwningHandle()); + return maxLogFileSize(nativeHandle_); + } + + @Override + public DBOptions setLogFileTimeToRoll( + final long logFileTimeToRoll) { + assert(isOwningHandle()); + setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll); + return this; + } + + @Override + public long logFileTimeToRoll() { + assert(isOwningHandle()); + return logFileTimeToRoll(nativeHandle_); + } + + @Override + public DBOptions setKeepLogFileNum( + final long keepLogFileNum) { + assert(isOwningHandle()); + setKeepLogFileNum(nativeHandle_, keepLogFileNum); + return this; + } + + @Override + public long keepLogFileNum() { + assert(isOwningHandle()); + return keepLogFileNum(nativeHandle_); + } + + @Override + public DBOptions setRecycleLogFileNum(final long recycleLogFileNum) { + assert(isOwningHandle()); + setRecycleLogFileNum(nativeHandle_, recycleLogFileNum); + return this; + } + + @Override + public long recycleLogFileNum() { + assert(isOwningHandle()); + return recycleLogFileNum(nativeHandle_); + } + + @Override + public DBOptions setMaxManifestFileSize( + final long maxManifestFileSize) { + assert(isOwningHandle()); + setMaxManifestFileSize(nativeHandle_, maxManifestFileSize); + return this; + } + + @Override + public long maxManifestFileSize() { + assert(isOwningHandle()); + return maxManifestFileSize(nativeHandle_); + } + + @Override + public DBOptions setTableCacheNumshardbits( + final int tableCacheNumshardbits) { + assert(isOwningHandle()); + setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits); + return this; + } + + @Override + public int tableCacheNumshardbits() { + assert(isOwningHandle()); + return tableCacheNumshardbits(nativeHandle_); + } + + @Override + public DBOptions setWalTtlSeconds( + final long walTtlSeconds) { + assert(isOwningHandle()); + setWalTtlSeconds(nativeHandle_, walTtlSeconds); + return this; + } + + @Override + public long walTtlSeconds() { + assert(isOwningHandle()); + return walTtlSeconds(nativeHandle_); + } + + @Override + public DBOptions setWalSizeLimitMB( + final long sizeLimitMB) { + assert(isOwningHandle()); + setWalSizeLimitMB(nativeHandle_, sizeLimitMB); + return this; + } + + @Override + public long walSizeLimitMB() { + assert(isOwningHandle()); + return walSizeLimitMB(nativeHandle_); + } + + @Override + public DBOptions setMaxWriteBatchGroupSizeBytes(final long maxWriteBatchGroupSizeBytes) { + setMaxWriteBatchGroupSizeBytes(nativeHandle_, maxWriteBatchGroupSizeBytes); + return this; + } + + @Override + public long maxWriteBatchGroupSizeBytes() { + assert (isOwningHandle()); + return maxWriteBatchGroupSizeBytes(nativeHandle_); + } + + @Override + public DBOptions setManifestPreallocationSize( + final long size) { + assert(isOwningHandle()); + setManifestPreallocationSize(nativeHandle_, size); + return this; + } + + @Override + public long manifestPreallocationSize() { + assert(isOwningHandle()); + return manifestPreallocationSize(nativeHandle_); + } + + @Override + public DBOptions setAllowMmapReads( + final boolean allowMmapReads) { + assert(isOwningHandle()); + setAllowMmapReads(nativeHandle_, allowMmapReads); + return this; + } + + @Override + public boolean allowMmapReads() { + assert(isOwningHandle()); + return allowMmapReads(nativeHandle_); + } + + @Override + public DBOptions setAllowMmapWrites( + final boolean allowMmapWrites) { + assert(isOwningHandle()); + setAllowMmapWrites(nativeHandle_, allowMmapWrites); + return this; + } + + @Override + public boolean allowMmapWrites() { + assert(isOwningHandle()); + return allowMmapWrites(nativeHandle_); + } + + @Override + public DBOptions setUseDirectReads( + final boolean useDirectReads) { + assert(isOwningHandle()); + setUseDirectReads(nativeHandle_, useDirectReads); + return this; + } + + @Override + public boolean useDirectReads() { + assert(isOwningHandle()); + return useDirectReads(nativeHandle_); + } + + @Override + public DBOptions setUseDirectIoForFlushAndCompaction( + final boolean useDirectIoForFlushAndCompaction) { + assert(isOwningHandle()); + setUseDirectIoForFlushAndCompaction(nativeHandle_, + useDirectIoForFlushAndCompaction); + return this; + } + + @Override + public boolean useDirectIoForFlushAndCompaction() { + assert(isOwningHandle()); + return useDirectIoForFlushAndCompaction(nativeHandle_); + } + + @Override + public DBOptions setAllowFAllocate(final boolean allowFAllocate) { + assert(isOwningHandle()); + setAllowFAllocate(nativeHandle_, allowFAllocate); + return this; + } + + @Override + public boolean allowFAllocate() { + assert(isOwningHandle()); + return allowFAllocate(nativeHandle_); + } + + @Override + public DBOptions setIsFdCloseOnExec( + final boolean isFdCloseOnExec) { + assert(isOwningHandle()); + setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec); + return this; + } + + @Override + public boolean isFdCloseOnExec() { + assert(isOwningHandle()); + return isFdCloseOnExec(nativeHandle_); + } + + @Override + public DBOptions setStatsDumpPeriodSec( + final int statsDumpPeriodSec) { + assert(isOwningHandle()); + setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec); + return this; + } + + @Override + public int statsDumpPeriodSec() { + assert(isOwningHandle()); + return statsDumpPeriodSec(nativeHandle_); + } + + @Override + public DBOptions setStatsPersistPeriodSec( + final int statsPersistPeriodSec) { + assert(isOwningHandle()); + setStatsPersistPeriodSec(nativeHandle_, statsPersistPeriodSec); + return this; + } + + @Override + public int statsPersistPeriodSec() { + assert(isOwningHandle()); + return statsPersistPeriodSec(nativeHandle_); + } + + @Override + public DBOptions setStatsHistoryBufferSize( + final long statsHistoryBufferSize) { + assert(isOwningHandle()); + setStatsHistoryBufferSize(nativeHandle_, statsHistoryBufferSize); + return this; + } + + @Override + public long statsHistoryBufferSize() { + assert(isOwningHandle()); + return statsHistoryBufferSize(nativeHandle_); + } + + @Override + public DBOptions setAdviseRandomOnOpen( + final boolean adviseRandomOnOpen) { + assert(isOwningHandle()); + setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen); + return this; + } + + @Override + public boolean adviseRandomOnOpen() { + return adviseRandomOnOpen(nativeHandle_); + } + + @Override + public DBOptions setDbWriteBufferSize(final long dbWriteBufferSize) { + assert(isOwningHandle()); + setDbWriteBufferSize(nativeHandle_, dbWriteBufferSize); + return this; + } + + @Override + public DBOptions setWriteBufferManager(final WriteBufferManager writeBufferManager) { + assert(isOwningHandle()); + setWriteBufferManager(nativeHandle_, writeBufferManager.nativeHandle_); + this.writeBufferManager_ = writeBufferManager; + return this; + } + + @Override + public WriteBufferManager writeBufferManager() { + assert(isOwningHandle()); + return this.writeBufferManager_; + } + + @Override + public long dbWriteBufferSize() { + assert(isOwningHandle()); + return dbWriteBufferSize(nativeHandle_); + } + + @Override + public DBOptions setAccessHintOnCompactionStart(final AccessHint accessHint) { + assert(isOwningHandle()); + setAccessHintOnCompactionStart(nativeHandle_, accessHint.getValue()); + return this; + } + + @Override + public AccessHint accessHintOnCompactionStart() { + assert(isOwningHandle()); + return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_)); + } + + @Override + public DBOptions setCompactionReadaheadSize(final long compactionReadaheadSize) { + assert(isOwningHandle()); + setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize); + return this; + } + + @Override + public long compactionReadaheadSize() { + assert(isOwningHandle()); + return compactionReadaheadSize(nativeHandle_); + } + + @Override + public DBOptions setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) { + assert(isOwningHandle()); + setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize); + return this; + } + + @Override + public long randomAccessMaxBufferSize() { + assert(isOwningHandle()); + return randomAccessMaxBufferSize(nativeHandle_); + } + + @Override + public DBOptions setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) { + assert(isOwningHandle()); + setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize); + return this; + } + + @Override + public long writableFileMaxBufferSize() { + assert(isOwningHandle()); + return writableFileMaxBufferSize(nativeHandle_); + } + + @Override + public DBOptions setUseAdaptiveMutex( + final boolean useAdaptiveMutex) { + assert(isOwningHandle()); + setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex); + return this; + } + + @Override + public boolean useAdaptiveMutex() { + assert(isOwningHandle()); + return useAdaptiveMutex(nativeHandle_); + } + + @Override + public DBOptions setBytesPerSync( + final long bytesPerSync) { + assert(isOwningHandle()); + setBytesPerSync(nativeHandle_, bytesPerSync); + return this; + } + + @Override + public long bytesPerSync() { + return bytesPerSync(nativeHandle_); + } + + @Override + public DBOptions setWalBytesPerSync(final long walBytesPerSync) { + assert(isOwningHandle()); + setWalBytesPerSync(nativeHandle_, walBytesPerSync); + return this; + } + + @Override + public long walBytesPerSync() { + assert(isOwningHandle()); + return walBytesPerSync(nativeHandle_); + } + + @Override + public DBOptions setStrictBytesPerSync(final boolean strictBytesPerSync) { + assert(isOwningHandle()); + setStrictBytesPerSync(nativeHandle_, strictBytesPerSync); + return this; + } + + @Override + public boolean strictBytesPerSync() { + assert(isOwningHandle()); + return strictBytesPerSync(nativeHandle_); + } + + @Override + public DBOptions setListeners(final List listeners) { + assert (isOwningHandle()); + setEventListeners(nativeHandle_, RocksCallbackObject.toNativeHandleList(listeners)); + return this; + } + + @Override + public List listeners() { + assert (isOwningHandle()); + return Arrays.asList(eventListeners(nativeHandle_)); + } + + @Override + public DBOptions setEnableThreadTracking(final boolean enableThreadTracking) { + assert(isOwningHandle()); + setEnableThreadTracking(nativeHandle_, enableThreadTracking); + return this; + } + + @Override + public boolean enableThreadTracking() { + assert(isOwningHandle()); + return enableThreadTracking(nativeHandle_); + } + + @Override + public DBOptions setDelayedWriteRate(final long delayedWriteRate) { + assert(isOwningHandle()); + setDelayedWriteRate(nativeHandle_, delayedWriteRate); + return this; + } + + @Override + public long delayedWriteRate(){ + return delayedWriteRate(nativeHandle_); + } + + @Override + public DBOptions setEnablePipelinedWrite(final boolean enablePipelinedWrite) { + assert(isOwningHandle()); + setEnablePipelinedWrite(nativeHandle_, enablePipelinedWrite); + return this; + } + + @Override + public boolean enablePipelinedWrite() { + assert(isOwningHandle()); + return enablePipelinedWrite(nativeHandle_); + } + + @Override + public DBOptions setUnorderedWrite(final boolean unorderedWrite) { + setUnorderedWrite(nativeHandle_, unorderedWrite); + return this; + } + + @Override + public boolean unorderedWrite() { + return unorderedWrite(nativeHandle_); + } + + + @Override + public DBOptions setAllowConcurrentMemtableWrite( + final boolean allowConcurrentMemtableWrite) { + setAllowConcurrentMemtableWrite(nativeHandle_, + allowConcurrentMemtableWrite); + return this; + } + + @Override + public boolean allowConcurrentMemtableWrite() { + return allowConcurrentMemtableWrite(nativeHandle_); + } + + @Override + public DBOptions setEnableWriteThreadAdaptiveYield( + final boolean enableWriteThreadAdaptiveYield) { + setEnableWriteThreadAdaptiveYield(nativeHandle_, + enableWriteThreadAdaptiveYield); + return this; + } + + @Override + public boolean enableWriteThreadAdaptiveYield() { + return enableWriteThreadAdaptiveYield(nativeHandle_); + } + + @Override + public DBOptions setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) { + setWriteThreadMaxYieldUsec(nativeHandle_, writeThreadMaxYieldUsec); + return this; + } + + @Override + public long writeThreadMaxYieldUsec() { + return writeThreadMaxYieldUsec(nativeHandle_); + } + + @Override + public DBOptions setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) { + setWriteThreadSlowYieldUsec(nativeHandle_, writeThreadSlowYieldUsec); + return this; + } + + @Override + public long writeThreadSlowYieldUsec() { + return writeThreadSlowYieldUsec(nativeHandle_); + } + + @Override + public DBOptions setSkipStatsUpdateOnDbOpen(final boolean skipStatsUpdateOnDbOpen) { + assert(isOwningHandle()); + setSkipStatsUpdateOnDbOpen(nativeHandle_, skipStatsUpdateOnDbOpen); + return this; + } + + @Override + public boolean skipStatsUpdateOnDbOpen() { + assert(isOwningHandle()); + return skipStatsUpdateOnDbOpen(nativeHandle_); + } + + @Override + public DBOptions setSkipCheckingSstFileSizesOnDbOpen( + final boolean skipCheckingSstFileSizesOnDbOpen) { + setSkipCheckingSstFileSizesOnDbOpen(nativeHandle_, skipCheckingSstFileSizesOnDbOpen); + return this; + } + + @Override + public boolean skipCheckingSstFileSizesOnDbOpen() { + assert (isOwningHandle()); + return skipCheckingSstFileSizesOnDbOpen(nativeHandle_); + } + + @Override + public DBOptions setWalRecoveryMode(final WALRecoveryMode walRecoveryMode) { + assert(isOwningHandle()); + setWalRecoveryMode(nativeHandle_, walRecoveryMode.getValue()); + return this; + } + + @Override + public WALRecoveryMode walRecoveryMode() { + assert(isOwningHandle()); + return WALRecoveryMode.getWALRecoveryMode(walRecoveryMode(nativeHandle_)); + } + + @Override + public DBOptions setAllow2pc(final boolean allow2pc) { + assert(isOwningHandle()); + setAllow2pc(nativeHandle_, allow2pc); + return this; + } + + @Override + public boolean allow2pc() { + assert(isOwningHandle()); + return allow2pc(nativeHandle_); + } + + @Override + public DBOptions setRowCache(final Cache rowCache) { + assert(isOwningHandle()); + setRowCache(nativeHandle_, rowCache.nativeHandle_); + this.rowCache_ = rowCache; + return this; + } + + @Override + public Cache rowCache() { + assert(isOwningHandle()); + return this.rowCache_; + } + + @Override + public DBOptions setWalFilter(final AbstractWalFilter walFilter) { + assert(isOwningHandle()); + setWalFilter(nativeHandle_, walFilter.nativeHandle_); + this.walFilter_ = walFilter; + return this; + } + + @Override + public WalFilter walFilter() { + assert(isOwningHandle()); + return this.walFilter_; + } + + @Override + public DBOptions setFailIfOptionsFileError(final boolean failIfOptionsFileError) { + assert(isOwningHandle()); + setFailIfOptionsFileError(nativeHandle_, failIfOptionsFileError); + return this; + } + + @Override + public boolean failIfOptionsFileError() { + assert(isOwningHandle()); + return failIfOptionsFileError(nativeHandle_); + } + + @Override + public DBOptions setDumpMallocStats(final boolean dumpMallocStats) { + assert(isOwningHandle()); + setDumpMallocStats(nativeHandle_, dumpMallocStats); + return this; + } + + @Override + public boolean dumpMallocStats() { + assert(isOwningHandle()); + return dumpMallocStats(nativeHandle_); + } + + @Override + public DBOptions setAvoidFlushDuringRecovery(final boolean avoidFlushDuringRecovery) { + assert(isOwningHandle()); + setAvoidFlushDuringRecovery(nativeHandle_, avoidFlushDuringRecovery); + return this; + } + + @Override + public boolean avoidFlushDuringRecovery() { + assert(isOwningHandle()); + return avoidFlushDuringRecovery(nativeHandle_); + } + + @Override + public DBOptions setAvoidFlushDuringShutdown(final boolean avoidFlushDuringShutdown) { + assert(isOwningHandle()); + setAvoidFlushDuringShutdown(nativeHandle_, avoidFlushDuringShutdown); + return this; + } + + @Override + public boolean avoidFlushDuringShutdown() { + assert(isOwningHandle()); + return avoidFlushDuringShutdown(nativeHandle_); + } + + @Override + public DBOptions setAllowIngestBehind(final boolean allowIngestBehind) { + assert(isOwningHandle()); + setAllowIngestBehind(nativeHandle_, allowIngestBehind); + return this; + } + + @Override + public boolean allowIngestBehind() { + assert(isOwningHandle()); + return allowIngestBehind(nativeHandle_); + } + + @Override + public DBOptions setTwoWriteQueues(final boolean twoWriteQueues) { + assert(isOwningHandle()); + setTwoWriteQueues(nativeHandle_, twoWriteQueues); + return this; + } + + @Override + public boolean twoWriteQueues() { + assert(isOwningHandle()); + return twoWriteQueues(nativeHandle_); + } + + @Override + public DBOptions setManualWalFlush(final boolean manualWalFlush) { + assert(isOwningHandle()); + setManualWalFlush(nativeHandle_, manualWalFlush); + return this; + } + + @Override + public boolean manualWalFlush() { + assert(isOwningHandle()); + return manualWalFlush(nativeHandle_); + } + + @Override + public DBOptions setAtomicFlush(final boolean atomicFlush) { + setAtomicFlush(nativeHandle_, atomicFlush); + return this; + } + + @Override + public boolean atomicFlush() { + return atomicFlush(nativeHandle_); + } + + @Override + public DBOptions setAvoidUnnecessaryBlockingIO(final boolean avoidUnnecessaryBlockingIO) { + setAvoidUnnecessaryBlockingIO(nativeHandle_, avoidUnnecessaryBlockingIO); + return this; + } + + @Override + public boolean avoidUnnecessaryBlockingIO() { + assert (isOwningHandle()); + return avoidUnnecessaryBlockingIO(nativeHandle_); + } + + @Override + public DBOptions setPersistStatsToDisk(final boolean persistStatsToDisk) { + setPersistStatsToDisk(nativeHandle_, persistStatsToDisk); + return this; + } + + @Override + public boolean persistStatsToDisk() { + assert (isOwningHandle()); + return persistStatsToDisk(nativeHandle_); + } + + @Override + public DBOptions setWriteDbidToManifest(final boolean writeDbidToManifest) { + setWriteDbidToManifest(nativeHandle_, writeDbidToManifest); + return this; + } + + @Override + public boolean writeDbidToManifest() { + assert (isOwningHandle()); + return writeDbidToManifest(nativeHandle_); + } + + @Override + public DBOptions setLogReadaheadSize(final long logReadaheadSize) { + setLogReadaheadSize(nativeHandle_, logReadaheadSize); + return this; + } + + @Override + public long logReadaheadSize() { + assert (isOwningHandle()); + return logReadaheadSize(nativeHandle_); + } + + @Override + public DBOptions setBestEffortsRecovery(final boolean bestEffortsRecovery) { + setBestEffortsRecovery(nativeHandle_, bestEffortsRecovery); + return this; + } + + @Override + public boolean bestEffortsRecovery() { + assert (isOwningHandle()); + return bestEffortsRecovery(nativeHandle_); + } + + @Override + public DBOptions setMaxBgErrorResumeCount(final int maxBgerrorResumeCount) { + setMaxBgErrorResumeCount(nativeHandle_, maxBgerrorResumeCount); + return this; + } + + @Override + public int maxBgerrorResumeCount() { + assert (isOwningHandle()); + return maxBgerrorResumeCount(nativeHandle_); + } + + @Override + public DBOptions setBgerrorResumeRetryInterval(final long bgerrorResumeRetryInterval) { + setBgerrorResumeRetryInterval(nativeHandle_, bgerrorResumeRetryInterval); + return this; + } + + @Override + public long bgerrorResumeRetryInterval() { + assert (isOwningHandle()); + return bgerrorResumeRetryInterval(nativeHandle_); + } + + static final int DEFAULT_NUM_SHARD_BITS = -1; + + + + + /** + *

Private constructor to be used by + * {@link #getDBOptionsFromProps(java.util.Properties)}

+ * + * @param nativeHandle native handle to DBOptions instance. + */ + private DBOptions(final long nativeHandle) { + super(nativeHandle); + } + + private static native long getDBOptionsFromProps(long cfgHandle, String optString); + private static native long getDBOptionsFromProps(String optString); + + private static native long newDBOptions(); + private static native long copyDBOptions(final long handle); + private static native long newDBOptionsFromOptions(final long optionsHandle); + @Override protected final native void disposeInternal(final long handle); + + private native void optimizeForSmallDb(final long handle); + private native void setIncreaseParallelism(long handle, int totalThreads); + private native void setCreateIfMissing(long handle, boolean flag); + private native boolean createIfMissing(long handle); + private native void setCreateMissingColumnFamilies( + long handle, boolean flag); + private native boolean createMissingColumnFamilies(long handle); + private native void setEnv(long handle, long envHandle); + private native void setErrorIfExists(long handle, boolean errorIfExists); + private native boolean errorIfExists(long handle); + private native void setParanoidChecks( + long handle, boolean paranoidChecks); + private native boolean paranoidChecks(long handle); + private native void setRateLimiter(long handle, + long rateLimiterHandle); + private native void setSstFileManager(final long handle, + final long sstFileManagerHandle); + private native void setLogger(long handle, + long loggerHandle); + private native void setInfoLogLevel(long handle, byte logLevel); + private native byte infoLogLevel(long handle); + private native void setMaxOpenFiles(long handle, int maxOpenFiles); + private native int maxOpenFiles(long handle); + private native void setMaxFileOpeningThreads(final long handle, + final int maxFileOpeningThreads); + private native int maxFileOpeningThreads(final long handle); + private native void setMaxTotalWalSize(long handle, + long maxTotalWalSize); + private native long maxTotalWalSize(long handle); + private native void setStatistics(final long handle, final long statisticsHandle); + private native long statistics(final long handle); + private native boolean useFsync(long handle); + private native void setUseFsync(long handle, boolean useFsync); + private native void setDbPaths(final long handle, final String[] paths, + final long[] targetSizes); + private native long dbPathsLen(final long handle); + private native void dbPaths(final long handle, final String[] paths, + final long[] targetSizes); + private native void setDbLogDir(long handle, String dbLogDir); + private native String dbLogDir(long handle); + private native void setWalDir(long handle, String walDir); + private native String walDir(long handle); + private native void setDeleteObsoleteFilesPeriodMicros( + long handle, long micros); + private native long deleteObsoleteFilesPeriodMicros(long handle); + private native void setMaxBackgroundCompactions( + long handle, int maxBackgroundCompactions); + private native int maxBackgroundCompactions(long handle); + private native void setMaxSubcompactions(long handle, int maxSubcompactions); + private native int maxSubcompactions(long handle); + private native void setMaxBackgroundFlushes( + long handle, int maxBackgroundFlushes); + private native int maxBackgroundFlushes(long handle); + private native void setMaxBackgroundJobs(long handle, int maxBackgroundJobs); + private native int maxBackgroundJobs(long handle); + private native void setMaxLogFileSize(long handle, long maxLogFileSize) + throws IllegalArgumentException; + private native long maxLogFileSize(long handle); + private native void setLogFileTimeToRoll( + long handle, long logFileTimeToRoll) throws IllegalArgumentException; + private native long logFileTimeToRoll(long handle); + private native void setKeepLogFileNum(long handle, long keepLogFileNum) + throws IllegalArgumentException; + private native long keepLogFileNum(long handle); + private native void setRecycleLogFileNum(long handle, long recycleLogFileNum); + private native long recycleLogFileNum(long handle); + private native void setMaxManifestFileSize( + long handle, long maxManifestFileSize); + private native long maxManifestFileSize(long handle); + private native void setTableCacheNumshardbits( + long handle, int tableCacheNumshardbits); + private native int tableCacheNumshardbits(long handle); + private native void setWalTtlSeconds(long handle, long walTtlSeconds); + private native long walTtlSeconds(long handle); + private native void setWalSizeLimitMB(long handle, long sizeLimitMB); + private native long walSizeLimitMB(long handle); + private static native void setMaxWriteBatchGroupSizeBytes( + final long handle, final long maxWriteBatchGroupSizeBytes); + private static native long maxWriteBatchGroupSizeBytes(final long handle); + private native void setManifestPreallocationSize( + long handle, long size) throws IllegalArgumentException; + private native long manifestPreallocationSize(long handle); + private native void setUseDirectReads(long handle, boolean useDirectReads); + private native boolean useDirectReads(long handle); + private native void setUseDirectIoForFlushAndCompaction( + long handle, boolean useDirectIoForFlushAndCompaction); + private native boolean useDirectIoForFlushAndCompaction(long handle); + private native void setAllowFAllocate(final long handle, + final boolean allowFAllocate); + private native boolean allowFAllocate(final long handle); + private native void setAllowMmapReads( + long handle, boolean allowMmapReads); + private native boolean allowMmapReads(long handle); + private native void setAllowMmapWrites( + long handle, boolean allowMmapWrites); + private native boolean allowMmapWrites(long handle); + private native void setIsFdCloseOnExec( + long handle, boolean isFdCloseOnExec); + private native boolean isFdCloseOnExec(long handle); + private native void setStatsDumpPeriodSec( + long handle, int statsDumpPeriodSec); + private native int statsDumpPeriodSec(long handle); + private native void setStatsPersistPeriodSec( + final long handle, final int statsPersistPeriodSec); + private native int statsPersistPeriodSec( + final long handle); + private native void setStatsHistoryBufferSize( + final long handle, final long statsHistoryBufferSize); + private native long statsHistoryBufferSize( + final long handle); + private native void setAdviseRandomOnOpen( + long handle, boolean adviseRandomOnOpen); + private native boolean adviseRandomOnOpen(long handle); + private native void setDbWriteBufferSize(final long handle, + final long dbWriteBufferSize); + private native void setWriteBufferManager(final long dbOptionsHandle, + final long writeBufferManagerHandle); + private native long dbWriteBufferSize(final long handle); + private native void setAccessHintOnCompactionStart(final long handle, + final byte accessHintOnCompactionStart); + private native byte accessHintOnCompactionStart(final long handle); + private native void setCompactionReadaheadSize(final long handle, + final long compactionReadaheadSize); + private native long compactionReadaheadSize(final long handle); + private native void setRandomAccessMaxBufferSize(final long handle, + final long randomAccessMaxBufferSize); + private native long randomAccessMaxBufferSize(final long handle); + private native void setWritableFileMaxBufferSize(final long handle, + final long writableFileMaxBufferSize); + private native long writableFileMaxBufferSize(final long handle); + private native void setUseAdaptiveMutex( + long handle, boolean useAdaptiveMutex); + private native boolean useAdaptiveMutex(long handle); + private native void setBytesPerSync( + long handle, long bytesPerSync); + private native long bytesPerSync(long handle); + private native void setWalBytesPerSync(long handle, long walBytesPerSync); + private native long walBytesPerSync(long handle); + private native void setStrictBytesPerSync( + final long handle, final boolean strictBytesPerSync); + private native boolean strictBytesPerSync( + final long handle); + private static native void setEventListeners( + final long handle, final long[] eventListenerHandles); + private static native AbstractEventListener[] eventListeners(final long handle); + private native void setEnableThreadTracking(long handle, + boolean enableThreadTracking); + private native boolean enableThreadTracking(long handle); + private native void setDelayedWriteRate(long handle, long delayedWriteRate); + private native long delayedWriteRate(long handle); + private native void setEnablePipelinedWrite(final long handle, + final boolean enablePipelinedWrite); + private native boolean enablePipelinedWrite(final long handle); + private native void setUnorderedWrite(final long handle, + final boolean unorderedWrite); + private native boolean unorderedWrite(final long handle); + private native void setAllowConcurrentMemtableWrite(long handle, + boolean allowConcurrentMemtableWrite); + private native boolean allowConcurrentMemtableWrite(long handle); + private native void setEnableWriteThreadAdaptiveYield(long handle, + boolean enableWriteThreadAdaptiveYield); + private native boolean enableWriteThreadAdaptiveYield(long handle); + private native void setWriteThreadMaxYieldUsec(long handle, + long writeThreadMaxYieldUsec); + private native long writeThreadMaxYieldUsec(long handle); + private native void setWriteThreadSlowYieldUsec(long handle, + long writeThreadSlowYieldUsec); + private native long writeThreadSlowYieldUsec(long handle); + private native void setSkipStatsUpdateOnDbOpen(final long handle, + final boolean skipStatsUpdateOnDbOpen); + private native boolean skipStatsUpdateOnDbOpen(final long handle); + private static native void setSkipCheckingSstFileSizesOnDbOpen( + final long handle, final boolean skipChecking); + private static native boolean skipCheckingSstFileSizesOnDbOpen(final long handle); + private native void setWalRecoveryMode(final long handle, + final byte walRecoveryMode); + private native byte walRecoveryMode(final long handle); + private native void setAllow2pc(final long handle, + final boolean allow2pc); + private native boolean allow2pc(final long handle); + private native void setRowCache(final long handle, + final long rowCacheHandle); + private native void setWalFilter(final long handle, + final long walFilterHandle); + private native void setFailIfOptionsFileError(final long handle, + final boolean failIfOptionsFileError); + private native boolean failIfOptionsFileError(final long handle); + private native void setDumpMallocStats(final long handle, + final boolean dumpMallocStats); + private native boolean dumpMallocStats(final long handle); + private native void setAvoidFlushDuringRecovery(final long handle, + final boolean avoidFlushDuringRecovery); + private native boolean avoidFlushDuringRecovery(final long handle); + private native void setAvoidFlushDuringShutdown(final long handle, + final boolean avoidFlushDuringShutdown); + private native boolean avoidFlushDuringShutdown(final long handle); + private native void setAllowIngestBehind(final long handle, + final boolean allowIngestBehind); + private native boolean allowIngestBehind(final long handle); + private native void setTwoWriteQueues(final long handle, + final boolean twoWriteQueues); + private native boolean twoWriteQueues(final long handle); + private native void setManualWalFlush(final long handle, + final boolean manualWalFlush); + private native boolean manualWalFlush(final long handle); + private native void setAtomicFlush(final long handle, + final boolean atomicFlush); + private native boolean atomicFlush(final long handle); + private static native void setAvoidUnnecessaryBlockingIO( + final long handle, final boolean avoidBlockingIO); + private static native boolean avoidUnnecessaryBlockingIO(final long handle); + private static native void setPersistStatsToDisk( + final long handle, final boolean persistStatsToDisk); + private static native boolean persistStatsToDisk(final long handle); + private static native void setWriteDbidToManifest( + final long handle, final boolean writeDbidToManifest); + private static native boolean writeDbidToManifest(final long handle); + private static native void setLogReadaheadSize(final long handle, final long logReadaheadSize); + private static native long logReadaheadSize(final long handle); + private static native void setBestEffortsRecovery( + final long handle, final boolean bestEffortsRecovery); + private static native boolean bestEffortsRecovery(final long handle); + private static native void setMaxBgErrorResumeCount( + final long handle, final int maxBgerrorRecumeCount); + private static native int maxBgerrorResumeCount(final long handle); + private static native void setBgerrorResumeRetryInterval( + final long handle, final long bgerrorResumeRetryInterval); + private static native long bgerrorResumeRetryInterval(final long handle); + + // instance variables + // NOTE: If you add new member variables, please update the copy constructor above! + private Env env_; + private int numShardBits_; + private RateLimiter rateLimiter_; + private Cache rowCache_; + private WalFilter walFilter_; + private WriteBufferManager writeBufferManager_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java new file mode 100644 index 000000000..ef1b86bff --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java @@ -0,0 +1,1756 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Collection; +import java.util.List; + +public interface DBOptionsInterface> { + /** + * Use this if your DB is very small (like under 1GB) and you don't want to + * spend lots of memory for memtables. + * + * @return the instance of the current object. + */ + T optimizeForSmallDb(); + + /** + * Use the specified object to interact with the environment, + * e.g. to read/write files, schedule background work, etc. + * Default: {@link Env#getDefault()} + * + * @param env {@link Env} instance. + * @return the instance of the current Options. + */ + T setEnv(final Env env); + + /** + * Returns the set RocksEnv instance. + * + * @return {@link RocksEnv} instance set in the options. + */ + Env getEnv(); + + /** + *

By default, RocksDB uses only one background thread for flush and + * compaction. Calling this function will set it up such that total of + * `total_threads` is used.

+ * + *

You almost definitely want to call this function if your system is + * bottlenecked by RocksDB.

+ * + * @param totalThreads The total number of threads to be used by RocksDB. + * A good value is the number of cores. + * + * @return the instance of the current Options + */ + T setIncreaseParallelism(int totalThreads); + + /** + * If this value is set to true, then the database will be created + * if it is missing during {@code RocksDB.open()}. + * Default: false + * + * @param flag a flag indicating whether to create a database the + * specified database in {@link RocksDB#open(org.rocksdb.Options, String)} operation + * is missing. + * @return the instance of the current Options + * @see RocksDB#open(org.rocksdb.Options, String) + */ + T setCreateIfMissing(boolean flag); + + /** + * Return true if the create_if_missing flag is set to true. + * If true, the database will be created if it is missing. + * + * @return true if the createIfMissing option is set to true. + * @see #setCreateIfMissing(boolean) + */ + boolean createIfMissing(); + + /** + *

If true, missing column families will be automatically created

+ * + *

Default: false

+ * + * @param flag a flag indicating if missing column families shall be + * created automatically. + * @return true if missing column families shall be created automatically + * on open. + */ + T setCreateMissingColumnFamilies(boolean flag); + + /** + * Return true if the create_missing_column_families flag is set + * to true. If true column families be created if missing. + * + * @return true if the createMissingColumnFamilies is set to + * true. + * @see #setCreateMissingColumnFamilies(boolean) + */ + boolean createMissingColumnFamilies(); + + /** + * If true, an error will be thrown during RocksDB.open() if the + * database already exists. + * Default: false + * + * @param errorIfExists if true, an exception will be thrown + * during {@code RocksDB.open()} if the database already exists. + * @return the reference to the current option. + * @see RocksDB#open(org.rocksdb.Options, String) + */ + T setErrorIfExists(boolean errorIfExists); + + /** + * If true, an error will be thrown during RocksDB.open() if the + * database already exists. + * + * @return if true, an error is raised when the specified database + * already exists before open. + */ + boolean errorIfExists(); + + /** + * If true, the implementation will do aggressive checking of the + * data it is processing and will stop early if it detects any + * errors. This may have unforeseen ramifications: for example, a + * corruption of one DB entry may cause a large number of entries to + * become unreadable or for the entire DB to become unopenable. + * If any of the writes to the database fails (Put, Delete, Merge, Write), + * the database will switch to read-only mode and fail all other + * Write operations. + * Default: true + * + * @param paranoidChecks a flag to indicate whether paranoid-check + * is on. + * @return the reference to the current option. + */ + T setParanoidChecks(boolean paranoidChecks); + + /** + * If true, the implementation will do aggressive checking of the + * data it is processing and will stop early if it detects any + * errors. This may have unforeseen ramifications: for example, a + * corruption of one DB entry may cause a large number of entries to + * become unreadable or for the entire DB to become unopenable. + * If any of the writes to the database fails (Put, Delete, Merge, Write), + * the database will switch to read-only mode and fail all other + * Write operations. + * + * @return a boolean indicating whether paranoid-check is on. + */ + boolean paranoidChecks(); + + /** + * Use to control write rate of flush and compaction. Flush has higher + * priority than compaction. Rate limiting is disabled if nullptr. + * Default: nullptr + * + * @param rateLimiter {@link org.rocksdb.RateLimiter} instance. + * @return the instance of the current object. + * + * @since 3.10.0 + */ + T setRateLimiter(RateLimiter rateLimiter); + + /** + * Use to track SST files and control their file deletion rate. + * + * Features: + * - Throttle the deletion rate of the SST files. + * - Keep track the total size of all SST files. + * - Set a maximum allowed space limit for SST files that when reached + * the DB wont do any further flushes or compactions and will set the + * background error. + * - Can be shared between multiple dbs. + * + * Limitations: + * - Only track and throttle deletes of SST files in + * first db_path (db_name if db_paths is empty). + * + * @param sstFileManager The SST File Manager for the db. + * @return the instance of the current object. + */ + T setSstFileManager(SstFileManager sstFileManager); + + /** + *

Any internal progress/error information generated by + * the db will be written to the Logger if it is non-nullptr, + * or to a file stored in the same directory as the DB + * contents if info_log is nullptr.

+ * + *

Default: nullptr

+ * + * @param logger {@link Logger} instance. + * @return the instance of the current object. + */ + T setLogger(Logger logger); + + /** + *

Sets the RocksDB log level. Default level is INFO

+ * + * @param infoLogLevel log level to set. + * @return the instance of the current object. + */ + T setInfoLogLevel(InfoLogLevel infoLogLevel); + + /** + *

Returns currently set log level.

+ * @return {@link org.rocksdb.InfoLogLevel} instance. + */ + InfoLogLevel infoLogLevel(); + + /** + * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open + * all files on DB::Open(). You can use this option to increase the number + * of threads used to open the files. + * + * Default: 16 + * + * @param maxFileOpeningThreads the maximum number of threads to use to + * open files + * + * @return the reference to the current options. + */ + T setMaxFileOpeningThreads(int maxFileOpeningThreads); + + /** + * If {@link MutableDBOptionsInterface#maxOpenFiles()} is -1, DB will open all + * files on DB::Open(). You can use this option to increase the number of + * threads used to open the files. + * + * Default: 16 + * + * @return the maximum number of threads to use to open files + */ + int maxFileOpeningThreads(); + + /** + *

Sets the statistics object which collects metrics about database operations. + * Statistics objects should not be shared between DB instances as + * it does not use any locks to prevent concurrent updates.

+ * + * @param statistics The statistics to set + * + * @return the instance of the current object. + * + * @see RocksDB#open(org.rocksdb.Options, String) + */ + T setStatistics(final Statistics statistics); + + /** + *

Returns statistics object.

+ * + * @return the instance of the statistics object or null if there is no + * statistics object. + * + * @see #setStatistics(Statistics) + */ + Statistics statistics(); + + /** + *

If true, then every store to stable storage will issue a fsync.

+ *

If false, then every store to stable storage will issue a fdatasync. + * This parameter should be set to true while storing data to + * filesystem like ext3 that can lose files after a reboot.

+ *

Default: false

+ * + * @param useFsync a boolean flag to specify whether to use fsync + * @return the instance of the current object. + */ + T setUseFsync(boolean useFsync); + + /** + *

If true, then every store to stable storage will issue a fsync.

+ *

If false, then every store to stable storage will issue a fdatasync. + * This parameter should be set to true while storing data to + * filesystem like ext3 that can lose files after a reboot.

+ * + * @return boolean value indicating if fsync is used. + */ + boolean useFsync(); + + /** + * A list of paths where SST files can be put into, with its target size. + * Newer data is placed into paths specified earlier in the vector while + * older data gradually moves to paths specified later in the vector. + * + * For example, you have a flash device with 10GB allocated for the DB, + * as well as a hard drive of 2TB, you should config it to be: + * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] + * + * The system will try to guarantee data under each path is close to but + * not larger than the target size. But current and future file sizes used + * by determining where to place a file are based on best-effort estimation, + * which means there is a chance that the actual size under the directory + * is slightly more than target size under some workloads. User should give + * some buffer room for those cases. + * + * If none of the paths has sufficient room to place a file, the file will + * be placed to the last path anyway, despite to the target size. + * + * Placing newer data to earlier paths is also best-efforts. User should + * expect user files to be placed in higher levels in some extreme cases. + * + * If left empty, only one path will be used, which is db_name passed when + * opening the DB. + * + * Default: empty + * + * @param dbPaths the paths and target sizes + * + * @return the reference to the current options + */ + T setDbPaths(final Collection dbPaths); + + /** + * A list of paths where SST files can be put into, with its target size. + * Newer data is placed into paths specified earlier in the vector while + * older data gradually moves to paths specified later in the vector. + * + * For example, you have a flash device with 10GB allocated for the DB, + * as well as a hard drive of 2TB, you should config it to be: + * [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] + * + * The system will try to guarantee data under each path is close to but + * not larger than the target size. But current and future file sizes used + * by determining where to place a file are based on best-effort estimation, + * which means there is a chance that the actual size under the directory + * is slightly more than target size under some workloads. User should give + * some buffer room for those cases. + * + * If none of the paths has sufficient room to place a file, the file will + * be placed to the last path anyway, despite to the target size. + * + * Placing newer data to earlier paths is also best-efforts. User should + * expect user files to be placed in higher levels in some extreme cases. + * + * If left empty, only one path will be used, which is db_name passed when + * opening the DB. + * + * Default: {@link java.util.Collections#emptyList()} + * + * @return dbPaths the paths and target sizes + */ + List dbPaths(); + + /** + * This specifies the info LOG dir. + * If it is empty, the log files will be in the same dir as data. + * If it is non empty, the log files will be in the specified dir, + * and the db data dir's absolute path will be used as the log file + * name's prefix. + * + * @param dbLogDir the path to the info log directory + * @return the instance of the current object. + */ + T setDbLogDir(String dbLogDir); + + /** + * Returns the directory of info log. + * + * If it is empty, the log files will be in the same dir as data. + * If it is non empty, the log files will be in the specified dir, + * and the db data dir's absolute path will be used as the log file + * name's prefix. + * + * @return the path to the info log directory + */ + String dbLogDir(); + + /** + * This specifies the absolute dir path for write-ahead logs (WAL). + * If it is empty, the log files will be in the same dir as data, + * dbname is used as the data dir by default + * If it is non empty, the log files will be in kept the specified dir. + * When destroying the db, + * all log files in wal_dir and the dir itself is deleted + * + * @param walDir the path to the write-ahead-log directory. + * @return the instance of the current object. + */ + T setWalDir(String walDir); + + /** + * Returns the path to the write-ahead-logs (WAL) directory. + * + * If it is empty, the log files will be in the same dir as data, + * dbname is used as the data dir by default + * If it is non empty, the log files will be in kept the specified dir. + * When destroying the db, + * all log files in wal_dir and the dir itself is deleted + * + * @return the path to the write-ahead-logs (WAL) directory. + */ + String walDir(); + + /** + * The periodicity when obsolete files get deleted. The default + * value is 6 hours. The files that get out of scope by compaction + * process will still get automatically delete on every compaction, + * regardless of this setting + * + * @param micros the time interval in micros + * @return the instance of the current object. + */ + T setDeleteObsoleteFilesPeriodMicros(long micros); + + /** + * The periodicity when obsolete files get deleted. The default + * value is 6 hours. The files that get out of scope by compaction + * process will still get automatically delete on every compaction, + * regardless of this setting + * + * @return the time interval in micros when obsolete files will be deleted. + */ + long deleteObsoleteFilesPeriodMicros(); + + /** + * This value represents the maximum number of threads that will + * concurrently perform a compaction job by breaking it into multiple, + * smaller ones that are run simultaneously. + * Default: 1 (i.e. no subcompactions) + * + * @param maxSubcompactions The maximum number of threads that will + * concurrently perform a compaction job + * + * @return the instance of the current object. + */ + T setMaxSubcompactions(int maxSubcompactions); + + /** + * This value represents the maximum number of threads that will + * concurrently perform a compaction job by breaking it into multiple, + * smaller ones that are run simultaneously. + * Default: 1 (i.e. no subcompactions) + * + * @return The maximum number of threads that will concurrently perform a + * compaction job + */ + int maxSubcompactions(); + + /** + * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the + * value of max_background_jobs. For backwards compatibility we will set + * `max_background_jobs = max_background_compactions + max_background_flushes` + * in the case where user sets at least one of `max_background_compactions` or + * `max_background_flushes`. + * + * Specifies the maximum number of concurrent background flush jobs. + * If you're increasing this, also consider increasing number of threads in + * HIGH priority thread pool. For more information, see + * Default: -1 + * + * @param maxBackgroundFlushes number of max concurrent flush jobs + * @return the instance of the current object. + * + * @see RocksEnv#setBackgroundThreads(int) + * @see RocksEnv#setBackgroundThreads(int, Priority) + * @see MutableDBOptionsInterface#maxBackgroundCompactions() + * + * @deprecated Use {@link MutableDBOptionsInterface#setMaxBackgroundJobs(int)} + */ + @Deprecated + T setMaxBackgroundFlushes(int maxBackgroundFlushes); + + /** + * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the + * value of max_background_jobs. For backwards compatibility we will set + * `max_background_jobs = max_background_compactions + max_background_flushes` + * in the case where user sets at least one of `max_background_compactions` or + * `max_background_flushes`. + * + * Returns the maximum number of concurrent background flush jobs. + * If you're increasing this, also consider increasing number of threads in + * HIGH priority thread pool. For more information, see + * Default: -1 + * + * @return the maximum number of concurrent background flush jobs. + * @see RocksEnv#setBackgroundThreads(int) + * @see RocksEnv#setBackgroundThreads(int, Priority) + */ + @Deprecated + int maxBackgroundFlushes(); + + /** + * Specifies the maximum size of a info log file. If the current log file + * is larger than `max_log_file_size`, a new info log file will + * be created. + * If 0, all logs will be written to one log file. + * + * @param maxLogFileSize the maximum size of a info log file. + * @return the instance of the current object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setMaxLogFileSize(long maxLogFileSize); + + /** + * Returns the maximum size of a info log file. If the current log file + * is larger than this size, a new info log file will be created. + * If 0, all logs will be written to one log file. + * + * @return the maximum size of the info log file. + */ + long maxLogFileSize(); + + /** + * Specifies the time interval for the info log file to roll (in seconds). + * If specified with non-zero value, log file will be rolled + * if it has been active longer than `log_file_time_to_roll`. + * Default: 0 (disabled) + * + * @param logFileTimeToRoll the time interval in seconds. + * @return the instance of the current object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setLogFileTimeToRoll(long logFileTimeToRoll); + + /** + * Returns the time interval for the info log file to roll (in seconds). + * If specified with non-zero value, log file will be rolled + * if it has been active longer than `log_file_time_to_roll`. + * Default: 0 (disabled) + * + * @return the time interval in seconds. + */ + long logFileTimeToRoll(); + + /** + * Specifies the maximum number of info log files to be kept. + * Default: 1000 + * + * @param keepLogFileNum the maximum number of info log files to be kept. + * @return the instance of the current object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setKeepLogFileNum(long keepLogFileNum); + + /** + * Returns the maximum number of info log files to be kept. + * Default: 1000 + * + * @return the maximum number of info log files to be kept. + */ + long keepLogFileNum(); + + /** + * Recycle log files. + * + * If non-zero, we will reuse previously written log files for new + * logs, overwriting the old data. The value indicates how many + * such files we will keep around at any point in time for later + * use. + * + * This is more efficient because the blocks are already + * allocated and fdatasync does not need to update the inode after + * each write. + * + * Default: 0 + * + * @param recycleLogFileNum the number of log files to keep for recycling + * + * @return the reference to the current options + */ + T setRecycleLogFileNum(long recycleLogFileNum); + + /** + * Recycle log files. + * + * If non-zero, we will reuse previously written log files for new + * logs, overwriting the old data. The value indicates how many + * such files we will keep around at any point in time for later + * use. + * + * This is more efficient because the blocks are already + * allocated and fdatasync does not need to update the inode after + * each write. + * + * Default: 0 + * + * @return the number of log files kept for recycling + */ + long recycleLogFileNum(); + + /** + * Manifest file is rolled over on reaching this limit. + * The older manifest file be deleted. + * The default value is 1GB so that the manifest file can grow, but not + * reach the limit of storage capacity. + * + * @param maxManifestFileSize the size limit of a manifest file. + * @return the instance of the current object. + */ + T setMaxManifestFileSize(long maxManifestFileSize); + + /** + * Manifest file is rolled over on reaching this limit. + * The older manifest file be deleted. + * The default value is 1GB so that the manifest file can grow, but not + * reach the limit of storage capacity. + * + * @return the size limit of a manifest file. + */ + long maxManifestFileSize(); + + /** + * Number of shards used for table cache. + * + * @param tableCacheNumshardbits the number of chards + * @return the instance of the current object. + */ + T setTableCacheNumshardbits(int tableCacheNumshardbits); + + /** + * Number of shards used for table cache. + * + * @return the number of shards used for table cache. + */ + int tableCacheNumshardbits(); + + /** + * {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect how archived logs + * will be deleted. + *
    + *
  1. If both set to 0, logs will be deleted asap and will not get into + * the archive.
  2. + *
  3. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, + * WAL files will be checked every 10 min and if total size is greater + * then WAL_size_limit_MB, they will be deleted starting with the + * earliest until size_limit is met. All empty files will be deleted.
  4. + *
  5. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then + * WAL files will be checked every WAL_ttl_seconds / 2 and those that + * are older than WAL_ttl_seconds will be deleted.
  6. + *
  7. If both are not 0, WAL files will be checked every 10 min and both + * checks will be performed with ttl being first.
  8. + *
+ * + * @param walTtlSeconds the ttl seconds + * @return the instance of the current object. + * @see #setWalSizeLimitMB(long) + */ + T setWalTtlSeconds(long walTtlSeconds); + + /** + * WalTtlSeconds() and walSizeLimitMB() affect how archived logs + * will be deleted. + *
    + *
  1. If both set to 0, logs will be deleted asap and will not get into + * the archive.
  2. + *
  3. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, + * WAL files will be checked every 10 min and if total size is greater + * then WAL_size_limit_MB, they will be deleted starting with the + * earliest until size_limit is met. All empty files will be deleted.
  4. + *
  5. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then + * WAL files will be checked every WAL_ttl_seconds / 2 and those that + * are older than WAL_ttl_seconds will be deleted.
  6. + *
  7. If both are not 0, WAL files will be checked every 10 min and both + * checks will be performed with ttl being first.
  8. + *
+ * + * @return the wal-ttl seconds + * @see #walSizeLimitMB() + */ + long walTtlSeconds(); + + /** + * WalTtlSeconds() and walSizeLimitMB() affect how archived logs + * will be deleted. + *
    + *
  1. If both set to 0, logs will be deleted asap and will not get into + * the archive.
  2. + *
  3. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, + * WAL files will be checked every 10 min and if total size is greater + * then WAL_size_limit_MB, they will be deleted starting with the + * earliest until size_limit is met. All empty files will be deleted.
  4. + *
  5. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then + * WAL files will be checked every WAL_ttl_secondsi / 2 and those that + * are older than WAL_ttl_seconds will be deleted.
  6. + *
  7. If both are not 0, WAL files will be checked every 10 min and both + * checks will be performed with ttl being first.
  8. + *
+ * + * @param sizeLimitMB size limit in mega-bytes. + * @return the instance of the current object. + * @see #setWalSizeLimitMB(long) + */ + T setWalSizeLimitMB(long sizeLimitMB); + + /** + * {@link #walTtlSeconds()} and {@code #walSizeLimitMB()} affect how archived logs + * will be deleted. + *
    + *
  1. If both set to 0, logs will be deleted asap and will not get into + * the archive.
  2. + *
  3. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, + * WAL files will be checked every 10 min and if total size is greater + * then WAL_size_limit_MB, they will be deleted starting with the + * earliest until size_limit is met. All empty files will be deleted.
  4. + *
  5. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then + * WAL files will be checked every WAL_ttl_seconds i / 2 and those that + * are older than WAL_ttl_seconds will be deleted.
  6. + *
  7. If both are not 0, WAL files will be checked every 10 min and both + * checks will be performed with ttl being first.
  8. + *
+ * @return size limit in mega-bytes. + * @see #walSizeLimitMB() + */ + long walSizeLimitMB(); + + /** + * The maximum limit of number of bytes that are written in a single batch + * of WAL or memtable write. It is followed when the leader write size + * is larger than 1/8 of this limit. + * + * Default: 1 MB + * + * @param maxWriteBatchGroupSizeBytes the maximum limit of number of bytes, see description. + * @return the instance of the current object. + */ + T setMaxWriteBatchGroupSizeBytes(final long maxWriteBatchGroupSizeBytes); + + /** + * The maximum limit of number of bytes that are written in a single batch + * of WAL or memtable write. It is followed when the leader write size + * is larger than 1/8 of this limit. + * + * Default: 1 MB + * + * @return the maximum limit of number of bytes, see description. + */ + long maxWriteBatchGroupSizeBytes(); + + /** + * Number of bytes to preallocate (via fallocate) the manifest + * files. Default is 4mb, which is reasonable to reduce random IO + * as well as prevent overallocation for mounts that preallocate + * large amounts of data (such as xfs's allocsize option). + * + * @param size the size in byte + * @return the instance of the current object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setManifestPreallocationSize(long size); + + /** + * Number of bytes to preallocate (via fallocate) the manifest + * files. Default is 4mb, which is reasonable to reduce random IO + * as well as prevent overallocation for mounts that preallocate + * large amounts of data (such as xfs's allocsize option). + * + * @return size in bytes. + */ + long manifestPreallocationSize(); + + /** + * Enable the OS to use direct I/O for reading sst tables. + * Default: false + * + * @param useDirectReads if true, then direct read is enabled + * @return the instance of the current object. + */ + T setUseDirectReads(boolean useDirectReads); + + /** + * Enable the OS to use direct I/O for reading sst tables. + * Default: false + * + * @return if true, then direct reads are enabled + */ + boolean useDirectReads(); + + /** + * Enable the OS to use direct reads and writes in flush and + * compaction + * Default: false + * + * @param useDirectIoForFlushAndCompaction if true, then direct + * I/O will be enabled for background flush and compactions + * @return the instance of the current object. + */ + T setUseDirectIoForFlushAndCompaction(boolean useDirectIoForFlushAndCompaction); + + /** + * Enable the OS to use direct reads and writes in flush and + * compaction + * + * @return if true, then direct I/O is enabled for flush and + * compaction + */ + boolean useDirectIoForFlushAndCompaction(); + + /** + * Whether fallocate calls are allowed + * + * @param allowFAllocate false if fallocate() calls are bypassed + * + * @return the reference to the current options. + */ + T setAllowFAllocate(boolean allowFAllocate); + + /** + * Whether fallocate calls are allowed + * + * @return false if fallocate() calls are bypassed + */ + boolean allowFAllocate(); + + /** + * Allow the OS to mmap file for reading sst tables. + * Default: false + * + * @param allowMmapReads true if mmap reads are allowed. + * @return the instance of the current object. + */ + T setAllowMmapReads(boolean allowMmapReads); + + /** + * Allow the OS to mmap file for reading sst tables. + * Default: false + * + * @return true if mmap reads are allowed. + */ + boolean allowMmapReads(); + + /** + * Allow the OS to mmap file for writing. Default: false + * + * @param allowMmapWrites true if mmap writes are allowd. + * @return the instance of the current object. + */ + T setAllowMmapWrites(boolean allowMmapWrites); + + /** + * Allow the OS to mmap file for writing. Default: false + * + * @return true if mmap writes are allowed. + */ + boolean allowMmapWrites(); + + /** + * Disable child process inherit open files. Default: true + * + * @param isFdCloseOnExec true if child process inheriting open + * files is disabled. + * @return the instance of the current object. + */ + T setIsFdCloseOnExec(boolean isFdCloseOnExec); + + /** + * Disable child process inherit open files. Default: true + * + * @return true if child process inheriting open files is disabled. + */ + boolean isFdCloseOnExec(); + + /** + * If set true, will hint the underlying file system that the file + * access pattern is random, when a sst file is opened. + * Default: true + * + * @param adviseRandomOnOpen true if hinting random access is on. + * @return the instance of the current object. + */ + T setAdviseRandomOnOpen(boolean adviseRandomOnOpen); + + /** + * If set true, will hint the underlying file system that the file + * access pattern is random, when a sst file is opened. + * Default: true + * + * @return true if hinting random access is on. + */ + boolean adviseRandomOnOpen(); + + /** + * Amount of data to build up in memtables across all column + * families before writing to disk. + * + * This is distinct from {@link ColumnFamilyOptions#writeBufferSize()}, + * which enforces a limit for a single memtable. + * + * This feature is disabled by default. Specify a non-zero value + * to enable it. + * + * Default: 0 (disabled) + * + * @param dbWriteBufferSize the size of the write buffer + * + * @return the reference to the current options. + */ + T setDbWriteBufferSize(long dbWriteBufferSize); + + /** + * Use passed {@link WriteBufferManager} to control memory usage across + * multiple column families and/or DB instances. + * + * Check + * https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager + * for more details on when to use it + * + * @param writeBufferManager The WriteBufferManager to use + * @return the reference of the current options. + */ + T setWriteBufferManager(final WriteBufferManager writeBufferManager); + + /** + * Reference to {@link WriteBufferManager} used by it.
+ * + * Default: null (Disabled) + * + * @return a reference to WriteBufferManager + */ + WriteBufferManager writeBufferManager(); + + /** + * Amount of data to build up in memtables across all column + * families before writing to disk. + * + * This is distinct from {@link ColumnFamilyOptions#writeBufferSize()}, + * which enforces a limit for a single memtable. + * + * This feature is disabled by default. Specify a non-zero value + * to enable it. + * + * Default: 0 (disabled) + * + * @return the size of the write buffer + */ + long dbWriteBufferSize(); + + /** + * Specify the file access pattern once a compaction is started. + * It will be applied to all input files of a compaction. + * + * Default: {@link AccessHint#NORMAL} + * + * @param accessHint The access hint + * + * @return the reference to the current options. + */ + T setAccessHintOnCompactionStart(final AccessHint accessHint); + + /** + * Specify the file access pattern once a compaction is started. + * It will be applied to all input files of a compaction. + * + * Default: {@link AccessHint#NORMAL} + * + * @return The access hint + */ + AccessHint accessHintOnCompactionStart(); + + /** + * This is a maximum buffer size that is used by WinMmapReadableFile in + * unbuffered disk I/O mode. We need to maintain an aligned buffer for + * reads. We allow the buffer to grow until the specified value and then + * for bigger requests allocate one shot buffers. In unbuffered mode we + * always bypass read-ahead buffer at ReadaheadRandomAccessFile + * When read-ahead is required we then make use of + * {@link MutableDBOptionsInterface#compactionReadaheadSize()} value and + * always try to read ahead. + * With read-ahead we always pre-allocate buffer to the size instead of + * growing it up to a limit. + * + * This option is currently honored only on Windows + * + * Default: 1 Mb + * + * Special value: 0 - means do not maintain per instance buffer. Allocate + * per request buffer and avoid locking. + * + * @param randomAccessMaxBufferSize the maximum size of the random access + * buffer + * + * @return the reference to the current options. + */ + T setRandomAccessMaxBufferSize(long randomAccessMaxBufferSize); + + /** + * This is a maximum buffer size that is used by WinMmapReadableFile in + * unbuffered disk I/O mode. We need to maintain an aligned buffer for + * reads. We allow the buffer to grow until the specified value and then + * for bigger requests allocate one shot buffers. In unbuffered mode we + * always bypass read-ahead buffer at ReadaheadRandomAccessFile + * When read-ahead is required we then make use of + * {@link MutableDBOptionsInterface#compactionReadaheadSize()} value and + * always try to read ahead. With read-ahead we always pre-allocate buffer + * to the size instead of growing it up to a limit. + * + * This option is currently honored only on Windows + * + * Default: 1 Mb + * + * Special value: 0 - means do not maintain per instance buffer. Allocate + * per request buffer and avoid locking. + * + * @return the maximum size of the random access buffer + */ + long randomAccessMaxBufferSize(); + + /** + * Use adaptive mutex, which spins in the user space before resorting + * to kernel. This could reduce context switch when the mutex is not + * heavily contended. However, if the mutex is hot, we could end up + * wasting spin time. + * Default: false + * + * @param useAdaptiveMutex true if adaptive mutex is used. + * @return the instance of the current object. + */ + T setUseAdaptiveMutex(boolean useAdaptiveMutex); + + /** + * Use adaptive mutex, which spins in the user space before resorting + * to kernel. This could reduce context switch when the mutex is not + * heavily contended. However, if the mutex is hot, we could end up + * wasting spin time. + * Default: false + * + * @return true if adaptive mutex is used. + */ + boolean useAdaptiveMutex(); + + /** + * Sets the {@link EventListener}s whose callback functions + * will be called when specific RocksDB event happens. + * + * Note: the RocksJava API currently only supports EventListeners implemented in Java. + * It could be extended in future to also support adding/removing EventListeners implemented in + * C++. + * + * @param listeners the listeners who should be notified on various events. + * + * @return the instance of the current object. + */ + T setListeners(final List listeners); + + /** + * Sets the {@link EventListener}s whose callback functions + * will be called when specific RocksDB event happens. + * + * Note: the RocksJava API currently only supports EventListeners implemented in Java. + * It could be extended in future to also support adding/removing EventListeners implemented in + * C++. + * + * @return the instance of the current object. + */ + List listeners(); + + /** + * If true, then the status of the threads involved in this DB will + * be tracked and available via GetThreadList() API. + * + * Default: false + * + * @param enableThreadTracking true to enable tracking + * + * @return the reference to the current options. + */ + T setEnableThreadTracking(boolean enableThreadTracking); + + /** + * If true, then the status of the threads involved in this DB will + * be tracked and available via GetThreadList() API. + * + * Default: false + * + * @return true if tracking is enabled + */ + boolean enableThreadTracking(); + + /** + * By default, a single write thread queue is maintained. The thread gets + * to the head of the queue becomes write batch group leader and responsible + * for writing to WAL and memtable for the batch group. + * + * If {@link #enablePipelinedWrite()} is true, separate write thread queue is + * maintained for WAL write and memtable write. A write thread first enter WAL + * writer queue and then memtable writer queue. Pending thread on the WAL + * writer queue thus only have to wait for previous writers to finish their + * WAL writing but not the memtable writing. Enabling the feature may improve + * write throughput and reduce latency of the prepare phase of two-phase + * commit. + * + * Default: false + * + * @param enablePipelinedWrite true to enabled pipelined writes + * + * @return the reference to the current options. + */ + T setEnablePipelinedWrite(final boolean enablePipelinedWrite); + + /** + * Returns true if pipelined writes are enabled. + * See {@link #setEnablePipelinedWrite(boolean)}. + * + * @return true if pipelined writes are enabled, false otherwise. + */ + boolean enablePipelinedWrite(); + + /** + * Setting {@link #unorderedWrite()} to true trades higher write throughput with + * relaxing the immutability guarantee of snapshots. This violates the + * repeatability one expects from ::Get from a snapshot, as well as + * ::MultiGet and Iterator's consistent-point-in-time view property. + * If the application cannot tolerate the relaxed guarantees, it can implement + * its own mechanisms to work around that and yet benefit from the higher + * throughput. Using TransactionDB with WRITE_PREPARED write policy and + * {@link #twoWriteQueues()} true is one way to achieve immutable snapshots despite + * unordered_write. + * + * By default, i.e., when it is false, rocksdb does not advance the sequence + * number for new snapshots unless all the writes with lower sequence numbers + * are already finished. This provides the immutability that we except from + * snapshots. Moreover, since Iterator and MultiGet internally depend on + * snapshots, the snapshot immutability results into Iterator and MultiGet + * offering consistent-point-in-time view. If set to true, although + * Read-Your-Own-Write property is still provided, the snapshot immutability + * property is relaxed: the writes issued after the snapshot is obtained (with + * larger sequence numbers) will be still not visible to the reads from that + * snapshot, however, there still might be pending writes (with lower sequence + * number) that will change the state visible to the snapshot after they are + * landed to the memtable. + * + * @param unorderedWrite true to enabled unordered write + * + * @return the reference to the current options. + */ + T setUnorderedWrite(final boolean unorderedWrite); + + /** + * Returns true if unordered write are enabled. + * See {@link #setUnorderedWrite(boolean)}. + * + * @return true if unordered write are enabled, false otherwise. + */ + boolean unorderedWrite(); + + /** + * If true, allow multi-writers to update mem tables in parallel. + * Only some memtable factorys support concurrent writes; currently it + * is implemented only for SkipListFactory. Concurrent memtable writes + * are not compatible with inplace_update_support or filter_deletes. + * It is strongly recommended to set + * {@link #setEnableWriteThreadAdaptiveYield(boolean)} if you are going to use + * this feature. + * Default: true + * + * @param allowConcurrentMemtableWrite true to enable concurrent writes + * for the memtable + * + * @return the reference to the current options. + */ + T setAllowConcurrentMemtableWrite(boolean allowConcurrentMemtableWrite); + + /** + * If true, allow multi-writers to update mem tables in parallel. + * Only some memtable factorys support concurrent writes; currently it + * is implemented only for SkipListFactory. Concurrent memtable writes + * are not compatible with inplace_update_support or filter_deletes. + * It is strongly recommended to set + * {@link #setEnableWriteThreadAdaptiveYield(boolean)} if you are going to use + * this feature. + * Default: true + * + * @return true if concurrent writes are enabled for the memtable + */ + boolean allowConcurrentMemtableWrite(); + + /** + * If true, threads synchronizing with the write batch group leader will + * wait for up to {@link #writeThreadMaxYieldUsec()} before blocking on a + * mutex. This can substantially improve throughput for concurrent workloads, + * regardless of whether {@link #allowConcurrentMemtableWrite()} is enabled. + * Default: true + * + * @param enableWriteThreadAdaptiveYield true to enable adaptive yield for the + * write threads + * + * @return the reference to the current options. + */ + T setEnableWriteThreadAdaptiveYield( + boolean enableWriteThreadAdaptiveYield); + + /** + * If true, threads synchronizing with the write batch group leader will + * wait for up to {@link #writeThreadMaxYieldUsec()} before blocking on a + * mutex. This can substantially improve throughput for concurrent workloads, + * regardless of whether {@link #allowConcurrentMemtableWrite()} is enabled. + * Default: true + * + * @return true if adaptive yield is enabled + * for the writing threads + */ + boolean enableWriteThreadAdaptiveYield(); + + /** + * The maximum number of microseconds that a write operation will use + * a yielding spin loop to coordinate with other write threads before + * blocking on a mutex. (Assuming {@link #writeThreadSlowYieldUsec()} is + * set properly) increasing this value is likely to increase RocksDB + * throughput at the expense of increased CPU usage. + * Default: 100 + * + * @param writeThreadMaxYieldUsec maximum number of microseconds + * + * @return the reference to the current options. + */ + T setWriteThreadMaxYieldUsec(long writeThreadMaxYieldUsec); + + /** + * The maximum number of microseconds that a write operation will use + * a yielding spin loop to coordinate with other write threads before + * blocking on a mutex. (Assuming {@link #writeThreadSlowYieldUsec()} is + * set properly) increasing this value is likely to increase RocksDB + * throughput at the expense of increased CPU usage. + * Default: 100 + * + * @return the maximum number of microseconds + */ + long writeThreadMaxYieldUsec(); + + /** + * The latency in microseconds after which a std::this_thread::yield + * call (sched_yield on Linux) is considered to be a signal that + * other processes or threads would like to use the current core. + * Increasing this makes writer threads more likely to take CPU + * by spinning, which will show up as an increase in the number of + * involuntary context switches. + * Default: 3 + * + * @param writeThreadSlowYieldUsec the latency in microseconds + * + * @return the reference to the current options. + */ + T setWriteThreadSlowYieldUsec(long writeThreadSlowYieldUsec); + + /** + * The latency in microseconds after which a std::this_thread::yield + * call (sched_yield on Linux) is considered to be a signal that + * other processes or threads would like to use the current core. + * Increasing this makes writer threads more likely to take CPU + * by spinning, which will show up as an increase in the number of + * involuntary context switches. + * Default: 3 + * + * @return writeThreadSlowYieldUsec the latency in microseconds + */ + long writeThreadSlowYieldUsec(); + + /** + * If true, then DB::Open() will not update the statistics used to optimize + * compaction decision by loading table properties from many files. + * Turning off this feature will improve DBOpen time especially in + * disk environment. + * + * Default: false + * + * @param skipStatsUpdateOnDbOpen true if updating stats will be skipped + * + * @return the reference to the current options. + */ + T setSkipStatsUpdateOnDbOpen(boolean skipStatsUpdateOnDbOpen); + + /** + * If true, then DB::Open() will not update the statistics used to optimize + * compaction decision by loading table properties from many files. + * Turning off this feature will improve DBOpen time especially in + * disk environment. + * + * Default: false + * + * @return true if updating stats will be skipped + */ + boolean skipStatsUpdateOnDbOpen(); + + /** + * If true, then {@link RocksDB#open(String)} will not fetch and check sizes of all sst files. + * This may significantly speed up startup if there are many sst files, + * especially when using non-default Env with expensive GetFileSize(). + * We'll still check that all required sst files exist. + * If {@code paranoid_checks} is false, this option is ignored, and sst files are + * not checked at all. + * + * Default: false + * + * @param skipCheckingSstFileSizesOnDbOpen if true, then SST file sizes will not be checked + * when calling {@link RocksDB#open(String)}. + * @return the reference to the current options. + */ + T setSkipCheckingSstFileSizesOnDbOpen(final boolean skipCheckingSstFileSizesOnDbOpen); + + /** + * If true, then {@link RocksDB#open(String)} will not fetch and check sizes of all sst files. + * This may significantly speed up startup if there are many sst files, + * especially when using non-default Env with expensive GetFileSize(). + * We'll still check that all required sst files exist. + * If {@code paranoid_checks} is false, this option is ignored, and sst files are + * not checked at all. + * + * Default: false + * + * @return true, if file sizes will not be checked when calling {@link RocksDB#open(String)}. + */ + boolean skipCheckingSstFileSizesOnDbOpen(); + + /** + * Recovery mode to control the consistency while replaying WAL + * + * Default: {@link WALRecoveryMode#PointInTimeRecovery} + * + * @param walRecoveryMode The WAL recover mode + * + * @return the reference to the current options. + */ + T setWalRecoveryMode(WALRecoveryMode walRecoveryMode); + + /** + * Recovery mode to control the consistency while replaying WAL + * + * Default: {@link WALRecoveryMode#PointInTimeRecovery} + * + * @return The WAL recover mode + */ + WALRecoveryMode walRecoveryMode(); + + /** + * if set to false then recovery will fail when a prepared + * transaction is encountered in the WAL + * + * Default: false + * + * @param allow2pc true if two-phase-commit is enabled + * + * @return the reference to the current options. + */ + T setAllow2pc(boolean allow2pc); + + /** + * if set to false then recovery will fail when a prepared + * transaction is encountered in the WAL + * + * Default: false + * + * @return true if two-phase-commit is enabled + */ + boolean allow2pc(); + + /** + * A global cache for table-level rows. + * + * Default: null (disabled) + * + * @param rowCache The global row cache + * + * @return the reference to the current options. + */ + T setRowCache(final Cache rowCache); + + /** + * A global cache for table-level rows. + * + * Default: null (disabled) + * + * @return The global row cache + */ + Cache rowCache(); + + /** + * A filter object supplied to be invoked while processing write-ahead-logs + * (WALs) during recovery. The filter provides a way to inspect log + * records, ignoring a particular record or skipping replay. + * The filter is invoked at startup and is invoked from a single-thread + * currently. + * + * @param walFilter the filter for processing WALs during recovery. + * + * @return the reference to the current options. + */ + T setWalFilter(final AbstractWalFilter walFilter); + + /** + * Get's the filter for processing WALs during recovery. + * See {@link #setWalFilter(AbstractWalFilter)}. + * + * @return the filter used for processing WALs during recovery. + */ + WalFilter walFilter(); + + /** + * If true, then DB::Open / CreateColumnFamily / DropColumnFamily + * / SetOptions will fail if options file is not detected or properly + * persisted. + * + * DEFAULT: false + * + * @param failIfOptionsFileError true if we should fail if there is an error + * in the options file + * + * @return the reference to the current options. + */ + T setFailIfOptionsFileError(boolean failIfOptionsFileError); + + /** + * If true, then DB::Open / CreateColumnFamily / DropColumnFamily + * / SetOptions will fail if options file is not detected or properly + * persisted. + * + * DEFAULT: false + * + * @return true if we should fail if there is an error in the options file + */ + boolean failIfOptionsFileError(); + + /** + * If true, then print malloc stats together with rocksdb.stats + * when printing to LOG. + * + * DEFAULT: false + * + * @param dumpMallocStats true if malloc stats should be printed to LOG + * + * @return the reference to the current options. + */ + T setDumpMallocStats(boolean dumpMallocStats); + + /** + * If true, then print malloc stats together with rocksdb.stats + * when printing to LOG. + * + * DEFAULT: false + * + * @return true if malloc stats should be printed to LOG + */ + boolean dumpMallocStats(); + + /** + * By default RocksDB replay WAL logs and flush them on DB open, which may + * create very small SST files. If this option is enabled, RocksDB will try + * to avoid (but not guarantee not to) flush during recovery. Also, existing + * WAL logs will be kept, so that if crash happened before flush, we still + * have logs to recover from. + * + * DEFAULT: false + * + * @param avoidFlushDuringRecovery true to try to avoid (but not guarantee + * not to) flush during recovery + * + * @return the reference to the current options. + */ + T setAvoidFlushDuringRecovery(boolean avoidFlushDuringRecovery); + + /** + * By default RocksDB replay WAL logs and flush them on DB open, which may + * create very small SST files. If this option is enabled, RocksDB will try + * to avoid (but not guarantee not to) flush during recovery. Also, existing + * WAL logs will be kept, so that if crash happened before flush, we still + * have logs to recover from. + * + * DEFAULT: false + * + * @return true to try to avoid (but not guarantee not to) flush during + * recovery + */ + boolean avoidFlushDuringRecovery(); + + /** + * Set this option to true during creation of database if you want + * to be able to ingest behind (call IngestExternalFile() skipping keys + * that already exist, rather than overwriting matching keys). + * Setting this option to true will affect 2 things: + * 1) Disable some internal optimizations around SST file compression + * 2) Reserve bottom-most level for ingested files only. + * 3) Note that num_levels should be >= 3 if this option is turned on. + * + * DEFAULT: false + * + * @param allowIngestBehind true to allow ingest behind, false to disallow. + * + * @return the reference to the current options. + */ + T setAllowIngestBehind(final boolean allowIngestBehind); + + /** + * Returns true if ingest behind is allowed. + * See {@link #setAllowIngestBehind(boolean)}. + * + * @return true if ingest behind is allowed, false otherwise. + */ + boolean allowIngestBehind(); + + /** + * If enabled it uses two queues for writes, one for the ones with + * disable_memtable and one for the ones that also write to memtable. This + * allows the memtable writes not to lag behind other writes. It can be used + * to optimize MySQL 2PC in which only the commits, which are serial, write to + * memtable. + * + * DEFAULT: false + * + * @param twoWriteQueues true to enable two write queues, false otherwise. + * + * @return the reference to the current options. + */ + T setTwoWriteQueues(final boolean twoWriteQueues); + + /** + * Returns true if two write queues are enabled. + * + * @return true if two write queues are enabled, false otherwise. + */ + boolean twoWriteQueues(); + + /** + * If true WAL is not flushed automatically after each write. Instead it + * relies on manual invocation of FlushWAL to write the WAL buffer to its + * file. + * + * DEFAULT: false + * + * @param manualWalFlush true to set disable automatic WAL flushing, + * false otherwise. + * + * @return the reference to the current options. + */ + T setManualWalFlush(final boolean manualWalFlush); + + /** + * Returns true if automatic WAL flushing is disabled. + * See {@link #setManualWalFlush(boolean)}. + * + * @return true if automatic WAL flushing is disabled, false otherwise. + */ + boolean manualWalFlush(); + + /** + * If true, RocksDB supports flushing multiple column families and committing + * their results atomically to MANIFEST. Note that it is not + * necessary to set atomic_flush to true if WAL is always enabled since WAL + * allows the database to be restored to the last persistent state in WAL. + * This option is useful when there are column families with writes NOT + * protected by WAL. + * For manual flush, application has to specify which column families to + * flush atomically in {@link RocksDB#flush(FlushOptions, List)}. + * For auto-triggered flush, RocksDB atomically flushes ALL column families. + * + * Currently, any WAL-enabled writes after atomic flush may be replayed + * independently if the process crashes later and tries to recover. + * + * @param atomicFlush true to enable atomic flush of multiple column families. + * + * @return the reference to the current options. + */ + T setAtomicFlush(final boolean atomicFlush); + + /** + * Determine if atomic flush of multiple column families is enabled. + * + * See {@link #setAtomicFlush(boolean)}. + * + * @return true if atomic flush is enabled. + */ + boolean atomicFlush(); + + /** + * If true, working thread may avoid doing unnecessary and long-latency + * operation (such as deleting obsolete files directly or deleting memtable) + * and will instead schedule a background job to do it. + * Use it if you're latency-sensitive. + * If set to true, takes precedence over + * {@link ReadOptions#setBackgroundPurgeOnIteratorCleanup(boolean)}. + * + * @param avoidUnnecessaryBlockingIO If true, working thread may avoid doing unnecessary + * operation. + * @return the reference to the current options. + */ + T setAvoidUnnecessaryBlockingIO(final boolean avoidUnnecessaryBlockingIO); + + /** + * If true, working thread may avoid doing unnecessary and long-latency + * operation (such as deleting obsolete files directly or deleting memtable) + * and will instead schedule a background job to do it. + * Use it if you're latency-sensitive. + * If set to true, takes precedence over + * {@link ReadOptions#setBackgroundPurgeOnIteratorCleanup(boolean)}. + * + * @return true, if working thread may avoid doing unnecessary operation. + */ + boolean avoidUnnecessaryBlockingIO(); + + /** + * If true, automatically persist stats to a hidden column family (column + * family name: ___rocksdb_stats_history___) every + * stats_persist_period_sec seconds; otherwise, write to an in-memory + * struct. User can query through `GetStatsHistory` API. + * If user attempts to create a column family with the same name on a DB + * which have previously set persist_stats_to_disk to true, the column family + * creation will fail, but the hidden column family will survive, as well as + * the previously persisted statistics. + * When peristing stats to disk, the stat name will be limited at 100 bytes. + * Default: false + * + * @param persistStatsToDisk true if stats should be persisted to hidden column family. + * @return the instance of the current object. + */ + T setPersistStatsToDisk(final boolean persistStatsToDisk); + + /** + * If true, automatically persist stats to a hidden column family (column + * family name: ___rocksdb_stats_history___) every + * stats_persist_period_sec seconds; otherwise, write to an in-memory + * struct. User can query through `GetStatsHistory` API. + * If user attempts to create a column family with the same name on a DB + * which have previously set persist_stats_to_disk to true, the column family + * creation will fail, but the hidden column family will survive, as well as + * the previously persisted statistics. + * When peristing stats to disk, the stat name will be limited at 100 bytes. + * Default: false + * + * @return true if stats should be persisted to hidden column family. + */ + boolean persistStatsToDisk(); + + /** + * Historically DB ID has always been stored in Identity File in DB folder. + * If this flag is true, the DB ID is written to Manifest file in addition + * to the Identity file. By doing this 2 problems are solved + * 1. We don't checksum the Identity file where as Manifest file is. + * 2. Since the source of truth for DB is Manifest file DB ID will sit with + * the source of truth. Previously the Identity file could be copied + * independent of Manifest and that can result in wrong DB ID. + * We recommend setting this flag to true. + * Default: false + * + * @param writeDbidToManifest if true, then DB ID will be written to Manifest file. + * @return the instance of the current object. + */ + T setWriteDbidToManifest(final boolean writeDbidToManifest); + + /** + * Historically DB ID has always been stored in Identity File in DB folder. + * If this flag is true, the DB ID is written to Manifest file in addition + * to the Identity file. By doing this 2 problems are solved + * 1. We don't checksum the Identity file where as Manifest file is. + * 2. Since the source of truth for DB is Manifest file DB ID will sit with + * the source of truth. Previously the Identity file could be copied + * independent of Manifest and that can result in wrong DB ID. + * We recommend setting this flag to true. + * Default: false + * + * @return true, if DB ID will be written to Manifest file. + */ + boolean writeDbidToManifest(); + + /** + * The number of bytes to prefetch when reading the log. This is mostly useful + * for reading a remotely located log, as it can save the number of + * round-trips. If 0, then the prefetching is disabled. + * + * Default: 0 + * + * @param logReadaheadSize the number of bytes to prefetch when reading the log. + * @return the instance of the current object. + */ + T setLogReadaheadSize(final long logReadaheadSize); + + /** + * The number of bytes to prefetch when reading the log. This is mostly useful + * for reading a remotely located log, as it can save the number of + * round-trips. If 0, then the prefetching is disabled. + * + * Default: 0 + * + * @return the number of bytes to prefetch when reading the log. + */ + long logReadaheadSize(); + + /** + * By default, RocksDB recovery fails if any table file referenced in + * MANIFEST are missing after scanning the MANIFEST. + * Best-efforts recovery is another recovery mode that + * tries to restore the database to the most recent point in time without + * missing file. + * Currently not compatible with atomic flush. Furthermore, WAL files will + * not be used for recovery if best_efforts_recovery is true. + * Default: false + * + * @param bestEffortsRecovery if true, RocksDB will use best-efforts mode when recovering. + * @return the instance of the current object. + */ + T setBestEffortsRecovery(final boolean bestEffortsRecovery); + + /** + * By default, RocksDB recovery fails if any table file referenced in + * MANIFEST are missing after scanning the MANIFEST. + * Best-efforts recovery is another recovery mode that + * tries to restore the database to the most recent point in time without + * missing file. + * Currently not compatible with atomic flush. Furthermore, WAL files will + * not be used for recovery if best_efforts_recovery is true. + * Default: false + * + * @return true, if RocksDB uses best-efforts mode when recovering. + */ + boolean bestEffortsRecovery(); + + /** + * It defines how many times db resume is called by a separate thread when + * background retryable IO Error happens. When background retryable IO + * Error happens, SetBGError is called to deal with the error. If the error + * can be auto-recovered (e.g., retryable IO Error during Flush or WAL write), + * then db resume is called in background to recover from the error. If this + * value is 0 or negative, db resume will not be called. + * + * Default: INT_MAX + * + * @param maxBgerrorResumeCount maximum number of times db resume should be called when IO Error + * happens. + * @return the instance of the current object. + */ + T setMaxBgErrorResumeCount(final int maxBgerrorResumeCount); + + /** + * It defines how many times db resume is called by a separate thread when + * background retryable IO Error happens. When background retryable IO + * Error happens, SetBGError is called to deal with the error. If the error + * can be auto-recovered (e.g., retryable IO Error during Flush or WAL write), + * then db resume is called in background to recover from the error. If this + * value is 0 or negative, db resume will not be called. + * + * Default: INT_MAX + * + * @return maximum number of times db resume should be called when IO Error happens. + */ + int maxBgerrorResumeCount(); + + /** + * If max_bgerror_resume_count is ≥ 2, db resume is called multiple times. + * This option decides how long to wait to retry the next resume if the + * previous resume fails and satisfy redo resume conditions. + * + * Default: 1000000 (microseconds). + * + * @param bgerrorResumeRetryInterval how many microseconds to wait between DB resume attempts. + * @return the instance of the current object. + */ + T setBgerrorResumeRetryInterval(final long bgerrorResumeRetryInterval); + + /** + * If max_bgerror_resume_count is ≥ 2, db resume is called multiple times. + * This option decides how long to wait to retry the next resume if the + * previous resume fails and satisfy redo resume conditions. + * + * Default: 1000000 (microseconds). + * + * @return the instance of the current object. + */ + long bgerrorResumeRetryInterval(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/DataBlockIndexType.java b/src/rocksdb/java/src/main/java/org/rocksdb/DataBlockIndexType.java new file mode 100644 index 000000000..513e5b429 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/DataBlockIndexType.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + + +/** + * DataBlockIndexType used in conjunction with BlockBasedTable. + */ +public enum DataBlockIndexType { + /** + * traditional block type + */ + kDataBlockBinarySearch((byte)0x0), + + /** + * additional hash index + */ + kDataBlockBinaryAndHash((byte)0x1); + + private final byte value; + + DataBlockIndexType(final byte value) { + this.value = value; + } + + byte getValue() { + return value; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/DbPath.java b/src/rocksdb/java/src/main/java/org/rocksdb/DbPath.java new file mode 100644 index 000000000..3f0b67557 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/DbPath.java @@ -0,0 +1,47 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.file.Path; + +/** + * Tuple of database path and target size + */ +public class DbPath { + final Path path; + final long targetSize; + + public DbPath(final Path path, final long targetSize) { + this.path = path; + this.targetSize = targetSize; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + final DbPath dbPath = (DbPath) o; + + if (targetSize != dbPath.targetSize) { + return false; + } + + return path != null ? path.equals(dbPath.path) : dbPath.path == null; + } + + @Override + public int hashCode() { + int result = path != null ? path.hashCode() : 0; + result = 31 * result + (int) (targetSize ^ (targetSize >>> 32)); + return result; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java b/src/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java new file mode 100644 index 000000000..02fa3511f --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java @@ -0,0 +1,137 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + * Base class for slices which will receive direct + * ByteBuffer based access to the underlying data. + * + * ByteBuffer backed slices typically perform better with + * larger keys and values. When using smaller keys and + * values consider using @see org.rocksdb.Slice + */ +public class DirectSlice extends AbstractSlice { + public final static DirectSlice NONE = new DirectSlice(); + + /** + * Indicates whether we have to free the memory pointed to by the Slice + */ + private final boolean internalBuffer; + private volatile boolean cleared = false; + private volatile long internalBufferOffset = 0; + + /** + * Called from JNI to construct a new Java DirectSlice + * without an underlying C++ object set + * at creation time. + * + * Note: You should be aware that it is intentionally marked as + * package-private. This is so that developers cannot construct their own + * default DirectSlice objects (at present). As developers cannot construct + * their own DirectSlice objects through this, they are not creating + * underlying C++ DirectSlice objects, and so there is nothing to free + * (dispose) from Java. + */ + DirectSlice() { + super(); + this.internalBuffer = false; + } + + /** + * Constructs a slice + * where the data is taken from + * a String. + * + * @param str The string + */ + public DirectSlice(final String str) { + super(createNewSliceFromString(str)); + this.internalBuffer = true; + } + + /** + * Constructs a slice where the data is + * read from the provided + * ByteBuffer up to a certain length + * + * @param data The buffer containing the data + * @param length The length of the data to use for the slice + */ + public DirectSlice(final ByteBuffer data, final int length) { + super(createNewDirectSlice0(ensureDirect(data), length)); + this.internalBuffer = false; + } + + /** + * Constructs a slice where the data is + * read from the provided + * ByteBuffer + * + * @param data The bugger containing the data + */ + public DirectSlice(final ByteBuffer data) { + super(createNewDirectSlice1(ensureDirect(data))); + this.internalBuffer = false; + } + + private static ByteBuffer ensureDirect(final ByteBuffer data) { + if(!data.isDirect()) { + throw new IllegalArgumentException("The ByteBuffer must be direct"); + } + return data; + } + + /** + * Retrieves the byte at a specific offset + * from the underlying data + * + * @param offset The (zero-based) offset of the byte to retrieve + * + * @return the requested byte + */ + public byte get(final int offset) { + return get0(getNativeHandle(), offset); + } + + @Override + public void clear() { + clear0(getNativeHandle(), !cleared && internalBuffer, internalBufferOffset); + cleared = true; + } + + @Override + public void removePrefix(final int n) { + removePrefix0(getNativeHandle(), n); + this.internalBufferOffset += n; + } + + public void setLength(final int n) { + setLength0(getNativeHandle(), n); + } + + @Override + protected void disposeInternal() { + final long nativeHandle = getNativeHandle(); + if(!cleared && internalBuffer) { + disposeInternalBuf(nativeHandle, internalBufferOffset); + } + disposeInternal(nativeHandle); + } + + private native static long createNewDirectSlice0(final ByteBuffer data, + final int length); + private native static long createNewDirectSlice1(final ByteBuffer data); + @Override protected final native ByteBuffer data0(long handle); + private native byte get0(long handle, int offset); + private native void clear0(long handle, boolean internalBuffer, + long internalBufferOffset); + private native void removePrefix0(long handle, int length); + private native void setLength0(long handle, int length); + private native void disposeInternalBuf(final long handle, + long internalBufferOffset); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java b/src/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java new file mode 100644 index 000000000..5ceeb54c8 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * EncodingType + * + *

The value will determine how to encode keys + * when writing to a new SST file.

+ * + *

This value will be stored + * inside the SST file which will be used when reading from + * the file, which makes it possible for users to choose + * different encoding type when reopening a DB. Files with + * different encoding types can co-exist in the same DB and + * can be read.

+ */ +public enum EncodingType { + /** + * Always write full keys without any special encoding. + */ + kPlain((byte) 0), + /** + *

Find opportunity to write the same prefix once for multiple rows. + * In some cases, when a key follows a previous key with the same prefix, + * instead of writing out the full key, it just writes out the size of the + * shared prefix, as well as other bytes, to save some bytes.

+ * + *

When using this option, the user is required to use the same prefix + * extractor to make sure the same prefix will be extracted from the same key. + * The Name() value of the prefix extractor will be stored in the file. When + * reopening the file, the name of the options.prefix_extractor given will be + * bitwise compared to the prefix extractors stored in the file. An error + * will be returned if the two don't match.

+ */ + kPrefix((byte) 1); + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + private EncodingType(byte value) { + value_ = value; + } + + private final byte value_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Env.java b/src/rocksdb/java/src/main/java/org/rocksdb/Env.java new file mode 100644 index 000000000..07b5319bb --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Env.java @@ -0,0 +1,167 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Arrays; +import java.util.List; + +/** + * Base class for all Env implementations in RocksDB. + */ +public abstract class Env extends RocksObject { + + static { + RocksDB.loadLibrary(); + } + + private static final Env DEFAULT_ENV = new RocksEnv(getDefaultEnvInternal()); + static { + /** + * The Ownership of the Default Env belongs to C++ + * and so we disown the native handle here so that + * we cannot accidentally free it from Java. + */ + DEFAULT_ENV.disOwnNativeHandle(); + } + + /** + *

Returns the default environment suitable for the current operating + * system.

+ * + *

The result of {@code getDefault()} is a singleton whose ownership + * belongs to rocksdb c++. As a result, the returned RocksEnv will not + * have the ownership of its c++ resource, and calling its dispose()/close() + * will be no-op.

+ * + * @return the default {@link org.rocksdb.RocksEnv} instance. + */ + public static Env getDefault() { + return DEFAULT_ENV; + } + + /** + *

Sets the number of background worker threads of the low priority + * pool for this environment.

+ *

Default number: 1

+ * + * @param number the number of threads + * + * @return current {@link RocksEnv} instance. + */ + public Env setBackgroundThreads(final int number) { + return setBackgroundThreads(number, Priority.LOW); + } + + /** + *

Gets the number of background worker threads of the pool + * for this environment.

+ * + * @param priority the priority id of a specified thread pool. + * + * @return the number of threads. + */ + public int getBackgroundThreads(final Priority priority) { + return getBackgroundThreads(nativeHandle_, priority.getValue()); + } + + /** + *

Sets the number of background worker threads of the specified thread + * pool for this environment.

+ * + * @param number the number of threads + * @param priority the priority id of a specified thread pool. + * + *

Default number: 1

+ * @return current {@link RocksEnv} instance. + */ + public Env setBackgroundThreads(final int number, final Priority priority) { + setBackgroundThreads(nativeHandle_, number, priority.getValue()); + return this; + } + + /** + *

Returns the length of the queue associated with the specified + * thread pool.

+ * + * @param priority the priority id of a specified thread pool. + * + * @return the thread pool queue length. + */ + public int getThreadPoolQueueLen(final Priority priority) { + return getThreadPoolQueueLen(nativeHandle_, priority.getValue()); + } + + /** + * Enlarge number of background worker threads of a specific thread pool + * for this environment if it is smaller than specified. 'LOW' is the default + * pool. + * + * @param number the number of threads. + * @param priority the priority id of a specified thread pool. + * + * @return current {@link RocksEnv} instance. + */ + public Env incBackgroundThreadsIfNeeded(final int number, + final Priority priority) { + incBackgroundThreadsIfNeeded(nativeHandle_, number, priority.getValue()); + return this; + } + + /** + * Lower IO priority for threads from the specified pool. + * + * @param priority the priority id of a specified thread pool. + * + * @return current {@link RocksEnv} instance. + */ + public Env lowerThreadPoolIOPriority(final Priority priority) { + lowerThreadPoolIOPriority(nativeHandle_, priority.getValue()); + return this; + } + + /** + * Lower CPU priority for threads from the specified pool. + * + * @param priority the priority id of a specified thread pool. + * + * @return current {@link RocksEnv} instance. + */ + public Env lowerThreadPoolCPUPriority(final Priority priority) { + lowerThreadPoolCPUPriority(nativeHandle_, priority.getValue()); + return this; + } + + /** + * Returns the status of all threads that belong to the current Env. + * + * @return the status of all threads belong to this env. + * + * @throws RocksDBException if the thread list cannot be acquired. + */ + public List getThreadList() throws RocksDBException { + return Arrays.asList(getThreadList(nativeHandle_)); + } + + Env(final long nativeHandle) { + super(nativeHandle); + } + + private static native long getDefaultEnvInternal(); + private native void setBackgroundThreads( + final long handle, final int number, final byte priority); + private native int getBackgroundThreads(final long handle, + final byte priority); + private native int getThreadPoolQueueLen(final long handle, + final byte priority); + private native void incBackgroundThreadsIfNeeded(final long handle, + final int number, final byte priority); + private native void lowerThreadPoolIOPriority(final long handle, + final byte priority); + private native void lowerThreadPoolCPUPriority(final long handle, + final byte priority); + private native ThreadStatus[] getThreadList(final long handle) + throws RocksDBException; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java new file mode 100644 index 000000000..6baddb310 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/EnvOptions.java @@ -0,0 +1,366 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Options while opening a file to read/write + */ +public class EnvOptions extends RocksObject { + static { + RocksDB.loadLibrary(); + } + + /** + * Construct with default Options + */ + public EnvOptions() { + super(newEnvOptions()); + } + + /** + * Construct from {@link DBOptions}. + * + * @param dbOptions the database options. + */ + public EnvOptions(final DBOptions dbOptions) { + super(newEnvOptions(dbOptions.nativeHandle_)); + } + + /** + * Enable/Disable memory mapped reads. + * + * Default: false + * + * @param useMmapReads true to enable memory mapped reads, false to disable. + * + * @return the reference to these options. + */ + public EnvOptions setUseMmapReads(final boolean useMmapReads) { + setUseMmapReads(nativeHandle_, useMmapReads); + return this; + } + + /** + * Determine if memory mapped reads are in-use. + * + * @return true if memory mapped reads are in-use, false otherwise. + */ + public boolean useMmapReads() { + assert(isOwningHandle()); + return useMmapReads(nativeHandle_); + } + + /** + * Enable/Disable memory mapped Writes. + * + * Default: true + * + * @param useMmapWrites true to enable memory mapped writes, false to disable. + * + * @return the reference to these options. + */ + public EnvOptions setUseMmapWrites(final boolean useMmapWrites) { + setUseMmapWrites(nativeHandle_, useMmapWrites); + return this; + } + + /** + * Determine if memory mapped writes are in-use. + * + * @return true if memory mapped writes are in-use, false otherwise. + */ + public boolean useMmapWrites() { + assert(isOwningHandle()); + return useMmapWrites(nativeHandle_); + } + + /** + * Enable/Disable direct reads, i.e. {@code O_DIRECT}. + * + * Default: false + * + * @param useDirectReads true to enable direct reads, false to disable. + * + * @return the reference to these options. + */ + public EnvOptions setUseDirectReads(final boolean useDirectReads) { + setUseDirectReads(nativeHandle_, useDirectReads); + return this; + } + + /** + * Determine if direct reads are in-use. + * + * @return true if direct reads are in-use, false otherwise. + */ + public boolean useDirectReads() { + assert(isOwningHandle()); + return useDirectReads(nativeHandle_); + } + + /** + * Enable/Disable direct writes, i.e. {@code O_DIRECT}. + * + * Default: false + * + * @param useDirectWrites true to enable direct writes, false to disable. + * + * @return the reference to these options. + */ + public EnvOptions setUseDirectWrites(final boolean useDirectWrites) { + setUseDirectWrites(nativeHandle_, useDirectWrites); + return this; + } + + /** + * Determine if direct writes are in-use. + * + * @return true if direct writes are in-use, false otherwise. + */ + public boolean useDirectWrites() { + assert(isOwningHandle()); + return useDirectWrites(nativeHandle_); + } + + /** + * Enable/Disable fallocate calls. + * + * Default: true + * + * If false, {@code fallocate()} calls are bypassed. + * + * @param allowFallocate true to enable fallocate calls, false to disable. + * + * @return the reference to these options. + */ + public EnvOptions setAllowFallocate(final boolean allowFallocate) { + setAllowFallocate(nativeHandle_, allowFallocate); + return this; + } + + /** + * Determine if fallocate calls are used. + * + * @return true if fallocate calls are used, false otherwise. + */ + public boolean allowFallocate() { + assert(isOwningHandle()); + return allowFallocate(nativeHandle_); + } + + /** + * Enable/Disable the {@code FD_CLOEXEC} bit when opening file descriptors. + * + * Default: true + * + * @param setFdCloexec true to enable the {@code FB_CLOEXEC} bit, + * false to disable. + * + * @return the reference to these options. + */ + public EnvOptions setSetFdCloexec(final boolean setFdCloexec) { + setSetFdCloexec(nativeHandle_, setFdCloexec); + return this; + } + + /** + * Determine i fthe {@code FD_CLOEXEC} bit is set when opening file + * descriptors. + * + * @return true if the {@code FB_CLOEXEC} bit is enabled, false otherwise. + */ + public boolean setFdCloexec() { + assert(isOwningHandle()); + return setFdCloexec(nativeHandle_); + } + + /** + * Allows OS to incrementally sync files to disk while they are being + * written, in the background. Issue one request for every + * {@code bytesPerSync} written. + * + * Default: 0 + * + * @param bytesPerSync 0 to disable, otherwise the number of bytes. + * + * @return the reference to these options. + */ + public EnvOptions setBytesPerSync(final long bytesPerSync) { + setBytesPerSync(nativeHandle_, bytesPerSync); + return this; + } + + /** + * Get the number of incremental bytes per sync written in the background. + * + * @return 0 if disabled, otherwise the number of bytes. + */ + public long bytesPerSync() { + assert(isOwningHandle()); + return bytesPerSync(nativeHandle_); + } + + /** + * If true, we will preallocate the file with {@code FALLOC_FL_KEEP_SIZE} + * flag, which means that file size won't change as part of preallocation. + * If false, preallocation will also change the file size. This option will + * improve the performance in workloads where you sync the data on every + * write. By default, we set it to true for MANIFEST writes and false for + * WAL writes + * + * @param fallocateWithKeepSize true to preallocate, false otherwise. + * + * @return the reference to these options. + */ + public EnvOptions setFallocateWithKeepSize( + final boolean fallocateWithKeepSize) { + setFallocateWithKeepSize(nativeHandle_, fallocateWithKeepSize); + return this; + } + + /** + * Determine if file is preallocated. + * + * @return true if the file is preallocated, false otherwise. + */ + public boolean fallocateWithKeepSize() { + assert(isOwningHandle()); + return fallocateWithKeepSize(nativeHandle_); + } + + /** + * See {@link DBOptions#setCompactionReadaheadSize(long)}. + * + * @param compactionReadaheadSize the compaction read-ahead size. + * + * @return the reference to these options. + */ + public EnvOptions setCompactionReadaheadSize( + final long compactionReadaheadSize) { + setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize); + return this; + } + + /** + * See {@link DBOptions#compactionReadaheadSize()}. + * + * @return the compaction read-ahead size. + */ + public long compactionReadaheadSize() { + assert(isOwningHandle()); + return compactionReadaheadSize(nativeHandle_); + } + + /** + * See {@link DBOptions#setRandomAccessMaxBufferSize(long)}. + * + * @param randomAccessMaxBufferSize the max buffer size for random access. + * + * @return the reference to these options. + */ + public EnvOptions setRandomAccessMaxBufferSize( + final long randomAccessMaxBufferSize) { + setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize); + return this; + } + + /** + * See {@link DBOptions#randomAccessMaxBufferSize()}. + * + * @return the max buffer size for random access. + */ + public long randomAccessMaxBufferSize() { + assert(isOwningHandle()); + return randomAccessMaxBufferSize(nativeHandle_); + } + + /** + * See {@link DBOptions#setWritableFileMaxBufferSize(long)}. + * + * @param writableFileMaxBufferSize the max buffer size. + * + * @return the reference to these options. + */ + public EnvOptions setWritableFileMaxBufferSize( + final long writableFileMaxBufferSize) { + setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize); + return this; + } + + /** + * See {@link DBOptions#writableFileMaxBufferSize()}. + * + * @return the max buffer size. + */ + public long writableFileMaxBufferSize() { + assert(isOwningHandle()); + return writableFileMaxBufferSize(nativeHandle_); + } + + /** + * Set the write rate limiter for flush and compaction. + * + * @param rateLimiter the rate limiter. + * + * @return the reference to these options. + */ + public EnvOptions setRateLimiter(final RateLimiter rateLimiter) { + this.rateLimiter = rateLimiter; + setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_); + return this; + } + + /** + * Get the write rate limiter for flush and compaction. + * + * @return the rate limiter. + */ + public RateLimiter rateLimiter() { + assert(isOwningHandle()); + return rateLimiter; + } + + private native static long newEnvOptions(); + private native static long newEnvOptions(final long dboptions_handle); + @Override protected final native void disposeInternal(final long handle); + + private native void setUseMmapReads(final long handle, + final boolean useMmapReads); + private native boolean useMmapReads(final long handle); + private native void setUseMmapWrites(final long handle, + final boolean useMmapWrites); + private native boolean useMmapWrites(final long handle); + private native void setUseDirectReads(final long handle, + final boolean useDirectReads); + private native boolean useDirectReads(final long handle); + private native void setUseDirectWrites(final long handle, + final boolean useDirectWrites); + private native boolean useDirectWrites(final long handle); + private native void setAllowFallocate(final long handle, + final boolean allowFallocate); + private native boolean allowFallocate(final long handle); + private native void setSetFdCloexec(final long handle, + final boolean setFdCloexec); + private native boolean setFdCloexec(final long handle); + private native void setBytesPerSync(final long handle, + final long bytesPerSync); + private native long bytesPerSync(final long handle); + private native void setFallocateWithKeepSize( + final long handle, final boolean fallocateWithKeepSize); + private native boolean fallocateWithKeepSize(final long handle); + private native void setCompactionReadaheadSize( + final long handle, final long compactionReadaheadSize); + private native long compactionReadaheadSize(final long handle); + private native void setRandomAccessMaxBufferSize( + final long handle, final long randomAccessMaxBufferSize); + private native long randomAccessMaxBufferSize(final long handle); + private native void setWritableFileMaxBufferSize( + final long handle, final long writableFileMaxBufferSize); + private native long writableFileMaxBufferSize(final long handle); + private native void setRateLimiter(final long handle, + final long rateLimiterHandle); + private RateLimiter rateLimiter; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/EventListener.java b/src/rocksdb/java/src/main/java/org/rocksdb/EventListener.java new file mode 100644 index 000000000..a12ab92ba --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/EventListener.java @@ -0,0 +1,335 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; + +/** + * EventListener class contains a set of callback functions that will + * be called when specific RocksDB event happens such as flush. It can + * be used as a building block for developing custom features such as + * stats-collector or external compaction algorithm. + * + * Note that callback functions should not run for an extended period of + * time before the function returns, otherwise RocksDB may be blocked. + * For example, it is not suggested to do + * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, + * CompactionJobInfo)} (as it may run for a long while) or issue many of + * {@link RocksDB#put(ColumnFamilyHandle, WriteOptions, byte[], byte[])} + * (as Put may be blocked in certain cases) in the same thread in the + * EventListener callback. + * + * However, doing + * {@link RocksDB#compactFiles(CompactionOptions, ColumnFamilyHandle, List, int, int, + * CompactionJobInfo)} and {@link RocksDB#put(ColumnFamilyHandle, WriteOptions, byte[], byte[])} in + * another thread is considered safe. + * + * [Threading] All EventListener callback will be called using the + * actual thread that involves in that specific event. For example, it + * is the RocksDB background flush thread that does the actual flush to + * call {@link #onFlushCompleted(RocksDB, FlushJobInfo)}. + * + * [Locking] All EventListener callbacks are designed to be called without + * the current thread holding any DB mutex. This is to prevent potential + * deadlock and performance issue when using EventListener callback + * in a complex way. + */ +public interface EventListener { + /** + * A callback function to RocksDB which will be called before a + * RocksDB starts to flush memtables. + * + * Note that the this function must be implemented in a way such that + * it should not run for an extended period of time before the function + * returns. Otherwise, RocksDB may be blocked. + * + * @param db the database + * @param flushJobInfo the flush job info, contains data copied from + * respective native structure. + */ + void onFlushBegin(final RocksDB db, final FlushJobInfo flushJobInfo); + + /** + * callback function to RocksDB which will be called whenever a + * registered RocksDB flushes a file. + * + * Note that the this function must be implemented in a way such that + * it should not run for an extended period of time before the function + * returns. Otherwise, RocksDB may be blocked. + * + * @param db the database + * @param flushJobInfo the flush job info, contains data copied from + * respective native structure. + */ + void onFlushCompleted(final RocksDB db, final FlushJobInfo flushJobInfo); + + /** + * A callback function for RocksDB which will be called whenever + * a SST file is deleted. Different from + * {@link #onCompactionCompleted(RocksDB, CompactionJobInfo)} and + * {@link #onFlushCompleted(RocksDB, FlushJobInfo)}, + * this callback is designed for external logging + * service and thus only provide string parameters instead + * of a pointer to DB. Applications that build logic basic based + * on file creations and deletions is suggested to implement + * {@link #onFlushCompleted(RocksDB, FlushJobInfo)} and + * {@link #onCompactionCompleted(RocksDB, CompactionJobInfo)}. + * + * Note that if applications would like to use the passed reference + * outside this function call, they should make copies from the + * returned value. + * + * @param tableFileDeletionInfo the table file deletion info, + * contains data copied from respective native structure. + */ + void onTableFileDeleted(final TableFileDeletionInfo tableFileDeletionInfo); + + /** + * A callback function to RocksDB which will be called before a + * RocksDB starts to compact. The default implementation is + * no-op. + * + * Note that the this function must be implemented in a way such that + * it should not run for an extended period of time before the function + * returns. Otherwise, RocksDB may be blocked. + * + * @param db a pointer to the rocksdb instance which just compacted + * a file. + * @param compactionJobInfo a reference to a native CompactionJobInfo struct, + * which is released after this function is returned, and must be copied + * if it is needed outside of this function. + */ + void onCompactionBegin(final RocksDB db, final CompactionJobInfo compactionJobInfo); + + /** + * A callback function for RocksDB which will be called whenever + * a registered RocksDB compacts a file. The default implementation + * is a no-op. + * + * Note that this function must be implemented in a way such that + * it should not run for an extended period of time before the function + * returns. Otherwise, RocksDB may be blocked. + * + * @param db a pointer to the rocksdb instance which just compacted + * a file. + * @param compactionJobInfo a reference to a native CompactionJobInfo struct, + * which is released after this function is returned, and must be copied + * if it is needed outside of this function. + */ + void onCompactionCompleted(final RocksDB db, final CompactionJobInfo compactionJobInfo); + + /** + * A callback function for RocksDB which will be called whenever + * a SST file is created. Different from OnCompactionCompleted and + * OnFlushCompleted, this callback is designed for external logging + * service and thus only provide string parameters instead + * of a pointer to DB. Applications that build logic basic based + * on file creations and deletions is suggested to implement + * OnFlushCompleted and OnCompactionCompleted. + * + * Historically it will only be called if the file is successfully created. + * Now it will also be called on failure case. User can check info.status + * to see if it succeeded or not. + * + * Note that if applications would like to use the passed reference + * outside this function call, they should make copies from these + * returned value. + * + * @param tableFileCreationInfo the table file creation info, + * contains data copied from respective native structure. + */ + void onTableFileCreated(final TableFileCreationInfo tableFileCreationInfo); + + /** + * A callback function for RocksDB which will be called before + * a SST file is being created. It will follow by OnTableFileCreated after + * the creation finishes. + * + * Note that if applications would like to use the passed reference + * outside this function call, they should make copies from these + * returned value. + * + * @param tableFileCreationBriefInfo the table file creation brief info, + * contains data copied from respective native structure. + */ + void onTableFileCreationStarted(final TableFileCreationBriefInfo tableFileCreationBriefInfo); + + /** + * A callback function for RocksDB which will be called before + * a memtable is made immutable. + * + * Note that the this function must be implemented in a way such that + * it should not run for an extended period of time before the function + * returns. Otherwise, RocksDB may be blocked. + * + * Note that if applications would like to use the passed reference + * outside this function call, they should make copies from these + * returned value. + * + * @param memTableInfo the mem table info, contains data + * copied from respective native structure. + */ + void onMemTableSealed(final MemTableInfo memTableInfo); + + /** + * A callback function for RocksDB which will be called before + * a column family handle is deleted. + * + * Note that the this function must be implemented in a way such that + * it should not run for an extended period of time before the function + * returns. Otherwise, RocksDB may be blocked. + * + * @param columnFamilyHandle is a pointer to the column family handle to be + * deleted which will become a dangling pointer after the deletion. + */ + void onColumnFamilyHandleDeletionStarted(final ColumnFamilyHandle columnFamilyHandle); + + /** + * A callback function for RocksDB which will be called after an external + * file is ingested using IngestExternalFile. + * + * Note that the this function will run on the same thread as + * IngestExternalFile(), if this function is blocked, IngestExternalFile() + * will be blocked from finishing. + * + * @param db the database + * @param externalFileIngestionInfo the external file ingestion info, + * contains data copied from respective native structure. + */ + void onExternalFileIngested( + final RocksDB db, final ExternalFileIngestionInfo externalFileIngestionInfo); + + /** + * A callback function for RocksDB which will be called before setting the + * background error status to a non-OK value. The new background error status + * is provided in `bg_error` and can be modified by the callback. E.g., a + * callback can suppress errors by resetting it to Status::OK(), thus + * preventing the database from entering read-only mode. We do not provide any + * guarantee when failed flushes/compactions will be rescheduled if the user + * suppresses an error. + * + * Note that this function can run on the same threads as flush, compaction, + * and user writes. So, it is extremely important not to perform heavy + * computations or blocking calls in this function. + * + * @param backgroundErrorReason background error reason code + * @param backgroundError background error codes + */ + void onBackgroundError( + final BackgroundErrorReason backgroundErrorReason, final Status backgroundError); + + /** + * A callback function for RocksDB which will be called whenever a change + * of superversion triggers a change of the stall conditions. + * + * Note that the this function must be implemented in a way such that + * it should not run for an extended period of time before the function + * returns. Otherwise, RocksDB may be blocked. + * + * @param writeStallInfo write stall info, + * contains data copied from respective native structure. + */ + void onStallConditionsChanged(final WriteStallInfo writeStallInfo); + + /** + * A callback function for RocksDB which will be called whenever a file read + * operation finishes. + * + * @param fileOperationInfo file operation info, + * contains data copied from respective native structure. + */ + void onFileReadFinish(final FileOperationInfo fileOperationInfo); + + /** + * A callback function for RocksDB which will be called whenever a file write + * operation finishes. + * + * @param fileOperationInfo file operation info, + * contains data copied from respective native structure. + */ + void onFileWriteFinish(final FileOperationInfo fileOperationInfo); + + /** + * A callback function for RocksDB which will be called whenever a file flush + * operation finishes. + * + * @param fileOperationInfo file operation info, + * contains data copied from respective native structure. + */ + void onFileFlushFinish(final FileOperationInfo fileOperationInfo); + + /** + * A callback function for RocksDB which will be called whenever a file sync + * operation finishes. + * + * @param fileOperationInfo file operation info, + * contains data copied from respective native structure. + */ + void onFileSyncFinish(final FileOperationInfo fileOperationInfo); + + /** + * A callback function for RocksDB which will be called whenever a file + * rangeSync operation finishes. + * + * @param fileOperationInfo file operation info, + * contains data copied from respective native structure. + */ + void onFileRangeSyncFinish(final FileOperationInfo fileOperationInfo); + + /** + * A callback function for RocksDB which will be called whenever a file + * truncate operation finishes. + * + * @param fileOperationInfo file operation info, + * contains data copied from respective native structure. + */ + void onFileTruncateFinish(final FileOperationInfo fileOperationInfo); + + /** + * A callback function for RocksDB which will be called whenever a file close + * operation finishes. + * + * @param fileOperationInfo file operation info, + * contains data copied from respective native structure. + */ + void onFileCloseFinish(final FileOperationInfo fileOperationInfo); + + /** + * If true, the {@link #onFileReadFinish(FileOperationInfo)} + * and {@link #onFileWriteFinish(FileOperationInfo)} will be called. If + * false, then they won't be called. + * + * Default: false + * + * @return whether to callback when file read/write is finished + */ + boolean shouldBeNotifiedOnFileIO(); + + /** + * A callback function for RocksDB which will be called just before + * starting the automatic recovery process for recoverable background + * errors, such as NoSpace(). The callback can suppress the automatic + * recovery by setting returning false. The database will then + * have to be transitioned out of read-only mode by calling + * RocksDB#resume(). + * + * @param backgroundErrorReason background error reason code + * @param backgroundError background error codes + * @return return {@code false} if the automatic recovery should be suppressed + */ + boolean onErrorRecoveryBegin( + final BackgroundErrorReason backgroundErrorReason, final Status backgroundError); + + /** + * A callback function for RocksDB which will be called once the database + * is recovered from read-only mode after an error. When this is called, it + * means normal writes to the database can be issued and the user can + * initiate any further recovery actions needed + * + * @param oldBackgroundError old background error codes + */ + void onErrorRecoveryCompleted(final Status oldBackgroundError); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Experimental.java b/src/rocksdb/java/src/main/java/org/rocksdb/Experimental.java new file mode 100644 index 000000000..64b404d6f --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Experimental.java @@ -0,0 +1,23 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Marks a feature as experimental, meaning that it is likely + * to change or even be removed/re-engineered in the future + */ +@Documented +@Retention(RetentionPolicy.SOURCE) +@Target({ElementType.TYPE, ElementType.METHOD}) +public @interface Experimental { + String value(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java new file mode 100644 index 000000000..6b14a8024 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ExternalFileIngestionInfo.java @@ -0,0 +1,103 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Objects; + +public class ExternalFileIngestionInfo { + private final String columnFamilyName; + private final String externalFilePath; + private final String internalFilePath; + private final long globalSeqno; + private final TableProperties tableProperties; + + /** + * Access is package private as this will only be constructed from + * C++ via JNI and for testing. + */ + ExternalFileIngestionInfo(final String columnFamilyName, final String externalFilePath, + final String internalFilePath, final long globalSeqno, + final TableProperties tableProperties) { + this.columnFamilyName = columnFamilyName; + this.externalFilePath = externalFilePath; + this.internalFilePath = internalFilePath; + this.globalSeqno = globalSeqno; + this.tableProperties = tableProperties; + } + + /** + * Get the name of the column family. + * + * @return the name of the column family. + */ + public String getColumnFamilyName() { + return columnFamilyName; + } + + /** + * Get the path of the file outside the DB. + * + * @return the path of the file outside the DB. + */ + public String getExternalFilePath() { + return externalFilePath; + } + + /** + * Get the path of the file inside the DB. + * + * @return the path of the file inside the DB. + */ + public String getInternalFilePath() { + return internalFilePath; + } + + /** + * Get the global sequence number assigned to keys in this file. + * + * @return the global sequence number. + */ + public long getGlobalSeqno() { + return globalSeqno; + } + + /** + * Get the Table properties of the table being flushed. + * + * @return the table properties. + */ + public TableProperties getTableProperties() { + return tableProperties; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + ExternalFileIngestionInfo that = (ExternalFileIngestionInfo) o; + return globalSeqno == that.globalSeqno + && Objects.equals(columnFamilyName, that.columnFamilyName) + && Objects.equals(externalFilePath, that.externalFilePath) + && Objects.equals(internalFilePath, that.internalFilePath) + && Objects.equals(tableProperties, that.tableProperties); + } + + @Override + public int hashCode() { + return Objects.hash( + columnFamilyName, externalFilePath, internalFilePath, globalSeqno, tableProperties); + } + + @Override + public String toString() { + return "ExternalFileIngestionInfo{" + + "columnFamilyName='" + columnFamilyName + '\'' + ", externalFilePath='" + externalFilePath + + '\'' + ", internalFilePath='" + internalFilePath + '\'' + ", globalSeqno=" + globalSeqno + + ", tableProperties=" + tableProperties + '}'; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/FileOperationInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/FileOperationInfo.java new file mode 100644 index 000000000..aa5743ed3 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/FileOperationInfo.java @@ -0,0 +1,112 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Objects; + +/** + * Java representation of FileOperationInfo struct from include/rocksdb/listener.h + */ +public class FileOperationInfo { + private final String path; + private final long offset; + private final long length; + private final long startTimestamp; + private final long duration; + private final Status status; + + /** + * Access is private as this will only be constructed from + * C++ via JNI. + */ + FileOperationInfo(final String path, final long offset, final long length, + final long startTimestamp, final long duration, final Status status) { + this.path = path; + this.offset = offset; + this.length = length; + this.startTimestamp = startTimestamp; + this.duration = duration; + this.status = status; + } + + /** + * Get the file path. + * + * @return the file path. + */ + public String getPath() { + return path; + } + + /** + * Get the offset. + * + * @return the offset. + */ + public long getOffset() { + return offset; + } + + /** + * Get the length. + * + * @return the length. + */ + public long getLength() { + return length; + } + + /** + * Get the start timestamp (in nanoseconds). + * + * @return the start timestamp. + */ + public long getStartTimestamp() { + return startTimestamp; + } + + /** + * Get the operation duration (in nanoseconds). + * + * @return the operation duration. + */ + public long getDuration() { + return duration; + } + + /** + * Get the status. + * + * @return the status. + */ + public Status getStatus() { + return status; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + FileOperationInfo that = (FileOperationInfo) o; + return offset == that.offset && length == that.length && startTimestamp == that.startTimestamp + && duration == that.duration && Objects.equals(path, that.path) + && Objects.equals(status, that.status); + } + + @Override + public int hashCode() { + return Objects.hash(path, offset, length, startTimestamp, duration, status); + } + + @Override + public String toString() { + return "FileOperationInfo{" + + "path='" + path + '\'' + ", offset=" + offset + ", length=" + length + ", startTimestamp=" + + startTimestamp + ", duration=" + duration + ", status=" + status + '}'; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Filter.java b/src/rocksdb/java/src/main/java/org/rocksdb/Filter.java new file mode 100644 index 000000000..7f490cf59 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Filter.java @@ -0,0 +1,36 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Filters are stored in rocksdb and are consulted automatically + * by rocksdb to decide whether or not to read some + * information from disk. In many cases, a filter can cut down the + * number of disk seeks form a handful to a single disk seek per + * DB::Get() call. + */ +//TODO(AR) should be renamed FilterPolicy +public abstract class Filter extends RocksObject { + + protected Filter(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Deletes underlying C++ filter pointer. + * + * Note that this function should be called only after all + * RocksDB instances referencing the filter are closed. + * Otherwise an undefined behavior will occur. + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + @Override + protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/FlushJobInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/FlushJobInfo.java new file mode 100644 index 000000000..ca9aa0523 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/FlushJobInfo.java @@ -0,0 +1,186 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Objects; + +public class FlushJobInfo { + private final long columnFamilyId; + private final String columnFamilyName; + private final String filePath; + private final long threadId; + private final int jobId; + private final boolean triggeredWritesSlowdown; + private final boolean triggeredWritesStop; + private final long smallestSeqno; + private final long largestSeqno; + private final TableProperties tableProperties; + private final FlushReason flushReason; + + /** + * Access is package private as this will only be constructed from + * C++ via JNI and for testing. + */ + FlushJobInfo(final long columnFamilyId, final String columnFamilyName, final String filePath, + final long threadId, final int jobId, final boolean triggeredWritesSlowdown, + final boolean triggeredWritesStop, final long smallestSeqno, final long largestSeqno, + final TableProperties tableProperties, final byte flushReasonValue) { + this.columnFamilyId = columnFamilyId; + this.columnFamilyName = columnFamilyName; + this.filePath = filePath; + this.threadId = threadId; + this.jobId = jobId; + this.triggeredWritesSlowdown = triggeredWritesSlowdown; + this.triggeredWritesStop = triggeredWritesStop; + this.smallestSeqno = smallestSeqno; + this.largestSeqno = largestSeqno; + this.tableProperties = tableProperties; + this.flushReason = FlushReason.fromValue(flushReasonValue); + } + + /** + * Get the id of the column family. + * + * @return the id of the column family + */ + public long getColumnFamilyId() { + return columnFamilyId; + } + + /** + * Get the name of the column family. + * + * @return the name of the column family + */ + public String getColumnFamilyName() { + return columnFamilyName; + } + + /** + * Get the path to the newly created file. + * + * @return the path to the newly created file + */ + public String getFilePath() { + return filePath; + } + + /** + * Get the id of the thread that completed this flush job. + * + * @return the id of the thread that completed this flush job + */ + public long getThreadId() { + return threadId; + } + + /** + * Get the job id, which is unique in the same thread. + * + * @return the job id + */ + public int getJobId() { + return jobId; + } + + /** + * Determine if rocksdb is currently slowing-down all writes to prevent + * creating too many Level 0 files as compaction seems not able to + * catch up the write request speed. + * + * This indicates that there are too many files in Level 0. + * + * @return true if rocksdb is currently slowing-down all writes, + * false otherwise + */ + public boolean isTriggeredWritesSlowdown() { + return triggeredWritesSlowdown; + } + + /** + * Determine if rocksdb is currently blocking any writes to prevent + * creating more L0 files. + * + * This indicates that there are too many files in level 0. + * Compactions should try to compact L0 files down to lower levels as soon + * as possible. + * + * @return true if rocksdb is currently blocking any writes, false otherwise + */ + public boolean isTriggeredWritesStop() { + return triggeredWritesStop; + } + + /** + * Get the smallest sequence number in the newly created file. + * + * @return the smallest sequence number + */ + public long getSmallestSeqno() { + return smallestSeqno; + } + + /** + * Get the largest sequence number in the newly created file. + * + * @return the largest sequence number + */ + public long getLargestSeqno() { + return largestSeqno; + } + + /** + * Get the Table properties of the table being flushed. + * + * @return the Table properties of the table being flushed + */ + public TableProperties getTableProperties() { + return tableProperties; + } + + /** + * Get the reason for initiating the flush. + * + * @return the reason for initiating the flush. + */ + public FlushReason getFlushReason() { + return flushReason; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + FlushJobInfo that = (FlushJobInfo) o; + return columnFamilyId == that.columnFamilyId && threadId == that.threadId && jobId == that.jobId + && triggeredWritesSlowdown == that.triggeredWritesSlowdown + && triggeredWritesStop == that.triggeredWritesStop && smallestSeqno == that.smallestSeqno + && largestSeqno == that.largestSeqno + && Objects.equals(columnFamilyName, that.columnFamilyName) + && Objects.equals(filePath, that.filePath) + && Objects.equals(tableProperties, that.tableProperties) && flushReason == that.flushReason; + } + + @Override + public int hashCode() { + return Objects.hash(columnFamilyId, columnFamilyName, filePath, threadId, jobId, + triggeredWritesSlowdown, triggeredWritesStop, smallestSeqno, largestSeqno, tableProperties, + flushReason); + } + + @Override + public String toString() { + return "FlushJobInfo{" + + "columnFamilyId=" + columnFamilyId + ", columnFamilyName='" + columnFamilyName + '\'' + + ", filePath='" + filePath + '\'' + ", threadId=" + threadId + ", jobId=" + jobId + + ", triggeredWritesSlowdown=" + triggeredWritesSlowdown + + ", triggeredWritesStop=" + triggeredWritesStop + ", smallestSeqno=" + smallestSeqno + + ", largestSeqno=" + largestSeqno + ", tableProperties=" + tableProperties + + ", flushReason=" + flushReason + '}'; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java new file mode 100644 index 000000000..760b515fd --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java @@ -0,0 +1,90 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * FlushOptions to be passed to flush operations of + * {@link org.rocksdb.RocksDB}. + */ +public class FlushOptions extends RocksObject { + static { + RocksDB.loadLibrary(); + } + + /** + * Construct a new instance of FlushOptions. + */ + public FlushOptions(){ + super(newFlushOptions()); + } + + /** + * Set if the flush operation shall block until it terminates. + * + * @param waitForFlush boolean value indicating if the flush + * operations waits for termination of the flush process. + * + * @return instance of current FlushOptions. + */ + public FlushOptions setWaitForFlush(final boolean waitForFlush) { + assert(isOwningHandle()); + setWaitForFlush(nativeHandle_, waitForFlush); + return this; + } + + /** + * Wait for flush to finished. + * + * @return boolean value indicating if the flush operation + * waits for termination of the flush process. + */ + public boolean waitForFlush() { + assert(isOwningHandle()); + return waitForFlush(nativeHandle_); + } + + /** + * Set to true so that flush would proceeds immediately even it it means + * writes will stall for the duration of the flush. + * + * Set to false so that the operation will wait until it's possible to do + * the flush without causing stall or until required flush is performed by + * someone else (foreground call or background thread). + * + * Default: false + * + * @param allowWriteStall true to allow writes to stall for flush, false + * otherwise. + * + * @return instance of current FlushOptions. + */ + public FlushOptions setAllowWriteStall(final boolean allowWriteStall) { + assert(isOwningHandle()); + setAllowWriteStall(nativeHandle_, allowWriteStall); + return this; + } + + /** + * Returns true if writes are allowed to stall for flushes to complete, false + * otherwise. + * + * @return true if writes are allowed to stall for flushes + */ + public boolean allowWriteStall() { + assert(isOwningHandle()); + return allowWriteStall(nativeHandle_); + } + + private native static long newFlushOptions(); + @Override protected final native void disposeInternal(final long handle); + + private native void setWaitForFlush(final long handle, + final boolean wait); + private native boolean waitForFlush(final long handle); + private native void setAllowWriteStall(final long handle, + final boolean allowWriteStall); + private native boolean allowWriteStall(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/FlushReason.java b/src/rocksdb/java/src/main/java/org/rocksdb/FlushReason.java new file mode 100644 index 000000000..9d486cda1 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/FlushReason.java @@ -0,0 +1,53 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum FlushReason { + OTHERS((byte) 0x00), + GET_LIVE_FILES((byte) 0x01), + SHUTDOWN((byte) 0x02), + EXTERNAL_FILE_INGESTION((byte) 0x03), + MANUAL_COMPACTION((byte) 0x04), + WRITE_BUFFER_MANAGER((byte) 0x05), + WRITE_BUFFER_FULL((byte) 0x06), + TEST((byte) 0x07), + DELETE_FILES((byte) 0x08), + AUTO_COMPACTION((byte) 0x09), + MANUAL_FLUSH((byte) 0x0a), + ERROR_RECOVERY((byte) 0xb); + + private final byte value; + + FlushReason(final byte value) { + this.value = value; + } + + /** + * Get the internal representation. + * + * @return the internal representation + */ + byte getValue() { + return value; + } + + /** + * Get the FlushReason from the internal representation value. + * + * @return the flush reason. + * + * @throws IllegalArgumentException if the value is unknown. + */ + static FlushReason fromValue(final byte value) { + for (final FlushReason flushReason : FlushReason.values()) { + if (flushReason.value == value) { + return flushReason; + } + } + + throw new IllegalArgumentException("Illegal value provided for FlushReason: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java new file mode 100644 index 000000000..05cc2bb90 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java @@ -0,0 +1,174 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +/** + * The config for hash linked list memtable representation + * Such memtable contains a fix-sized array of buckets, where + * each bucket points to a sorted singly-linked + * list (or null if the bucket is empty). + * + * Note that since this mem-table representation relies on the + * key prefix, it is required to invoke one of the usePrefixExtractor + * functions to specify how to extract key prefix given a key. + * If proper prefix-extractor is not set, then RocksDB will + * use the default memtable representation (SkipList) instead + * and post a warning in the LOG. + */ +public class HashLinkedListMemTableConfig extends MemTableConfig { + public static final long DEFAULT_BUCKET_COUNT = 50000; + public static final long DEFAULT_HUGE_PAGE_TLB_SIZE = 0; + public static final int DEFAULT_BUCKET_ENTRIES_LOG_THRES = 4096; + public static final boolean + DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true; + public static final int DEFAUL_THRESHOLD_USE_SKIPLIST = 256; + + /** + * HashLinkedListMemTableConfig constructor + */ + public HashLinkedListMemTableConfig() { + bucketCount_ = DEFAULT_BUCKET_COUNT; + hugePageTlbSize_ = DEFAULT_HUGE_PAGE_TLB_SIZE; + bucketEntriesLoggingThreshold_ = DEFAULT_BUCKET_ENTRIES_LOG_THRES; + ifLogBucketDistWhenFlush_ = DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH; + thresholdUseSkiplist_ = DEFAUL_THRESHOLD_USE_SKIPLIST; + } + + /** + * Set the number of buckets in the fixed-size array used + * in the hash linked-list mem-table. + * + * @param count the number of hash buckets. + * @return the reference to the current HashLinkedListMemTableConfig. + */ + public HashLinkedListMemTableConfig setBucketCount( + final long count) { + bucketCount_ = count; + return this; + } + + /** + * Returns the number of buckets that will be used in the memtable + * created based on this config. + * + * @return the number of buckets + */ + public long bucketCount() { + return bucketCount_; + } + + /** + *

Set the size of huge tlb or allocate the hashtable bytes from + * malloc if {@code size <= 0}.

+ * + *

The user needs to reserve huge pages for it to be allocated, + * like: {@code sysctl -w vm.nr_hugepages=20}

+ * + *

See linux documentation/vm/hugetlbpage.txt

+ * + * @param size if set to {@code <= 0} hashtable bytes from malloc + * @return the reference to the current HashLinkedListMemTableConfig. + */ + public HashLinkedListMemTableConfig setHugePageTlbSize( + final long size) { + hugePageTlbSize_ = size; + return this; + } + + /** + * Returns the size value of hugePageTlbSize. + * + * @return the hugePageTlbSize. + */ + public long hugePageTlbSize() { + return hugePageTlbSize_; + } + + /** + * If number of entries in one bucket exceeds that setting, log + * about it. + * + * @param threshold - number of entries in a single bucket before + * logging starts. + * @return the reference to the current HashLinkedListMemTableConfig. + */ + public HashLinkedListMemTableConfig + setBucketEntriesLoggingThreshold(final int threshold) { + bucketEntriesLoggingThreshold_ = threshold; + return this; + } + + /** + * Returns the maximum number of entries in one bucket before + * logging starts. + * + * @return maximum number of entries in one bucket before logging + * starts. + */ + public int bucketEntriesLoggingThreshold() { + return bucketEntriesLoggingThreshold_; + } + + /** + * If true the distrubition of number of entries will be logged. + * + * @param logDistribution - boolean parameter indicating if number + * of entry distribution shall be logged. + * @return the reference to the current HashLinkedListMemTableConfig. + */ + public HashLinkedListMemTableConfig + setIfLogBucketDistWhenFlush(final boolean logDistribution) { + ifLogBucketDistWhenFlush_ = logDistribution; + return this; + } + + /** + * Returns information about logging the distribution of + * number of entries on flush. + * + * @return if distribution of number of entries shall be logged. + */ + public boolean ifLogBucketDistWhenFlush() { + return ifLogBucketDistWhenFlush_; + } + + /** + * Set maximum number of entries in one bucket. Exceeding this val + * leads to a switch from LinkedList to SkipList. + * + * @param threshold maximum number of entries before SkipList is + * used. + * @return the reference to the current HashLinkedListMemTableConfig. + */ + public HashLinkedListMemTableConfig + setThresholdUseSkiplist(final int threshold) { + thresholdUseSkiplist_ = threshold; + return this; + } + + /** + * Returns entries per bucket threshold before LinkedList is + * replaced by SkipList usage for that bucket. + * + * @return entries per bucket threshold before SkipList is used. + */ + public int thresholdUseSkiplist() { + return thresholdUseSkiplist_; + } + + @Override protected long newMemTableFactoryHandle() { + return newMemTableFactoryHandle(bucketCount_, hugePageTlbSize_, + bucketEntriesLoggingThreshold_, ifLogBucketDistWhenFlush_, + thresholdUseSkiplist_); + } + + private native long newMemTableFactoryHandle(long bucketCount, + long hugePageTlbSize, int bucketEntriesLoggingThreshold, + boolean ifLogBucketDistWhenFlush, int thresholdUseSkiplist) + throws IllegalArgumentException; + + private long bucketCount_; + private long hugePageTlbSize_; + private int bucketEntriesLoggingThreshold_; + private boolean ifLogBucketDistWhenFlush_; + private int thresholdUseSkiplist_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java new file mode 100644 index 000000000..efc78b14e --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java @@ -0,0 +1,106 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +/** + * The config for hash skip-list mem-table representation. + * Such mem-table representation contains a fix-sized array of + * buckets, where each bucket points to a skiplist (or null if the + * bucket is empty). + * + * Note that since this mem-table representation relies on the + * key prefix, it is required to invoke one of the usePrefixExtractor + * functions to specify how to extract key prefix given a key. + * If proper prefix-extractor is not set, then RocksDB will + * use the default memtable representation (SkipList) instead + * and post a warning in the LOG. + */ +public class HashSkipListMemTableConfig extends MemTableConfig { + public static final int DEFAULT_BUCKET_COUNT = 1000000; + public static final int DEFAULT_BRANCHING_FACTOR = 4; + public static final int DEFAULT_HEIGHT = 4; + + /** + * HashSkipListMemTableConfig constructor + */ + public HashSkipListMemTableConfig() { + bucketCount_ = DEFAULT_BUCKET_COUNT; + branchingFactor_ = DEFAULT_BRANCHING_FACTOR; + height_ = DEFAULT_HEIGHT; + } + + /** + * Set the number of hash buckets used in the hash skiplist memtable. + * Default = 1000000. + * + * @param count the number of hash buckets used in the hash + * skiplist memtable. + * @return the reference to the current HashSkipListMemTableConfig. + */ + public HashSkipListMemTableConfig setBucketCount( + final long count) { + bucketCount_ = count; + return this; + } + + /** + * @return the number of hash buckets + */ + public long bucketCount() { + return bucketCount_; + } + + /** + * Set the height of the skip list. Default = 4. + * + * @param height height to set. + * + * @return the reference to the current HashSkipListMemTableConfig. + */ + public HashSkipListMemTableConfig setHeight(final int height) { + height_ = height; + return this; + } + + /** + * @return the height of the skip list. + */ + public int height() { + return height_; + } + + /** + * Set the branching factor used in the hash skip-list memtable. + * This factor controls the probabilistic size ratio between adjacent + * links in the skip list. + * + * @param bf the probabilistic size ratio between adjacent link + * lists in the skip list. + * @return the reference to the current HashSkipListMemTableConfig. + */ + public HashSkipListMemTableConfig setBranchingFactor( + final int bf) { + branchingFactor_ = bf; + return this; + } + + /** + * @return branching factor, the probabilistic size ratio between + * adjacent links in the skip list. + */ + public int branchingFactor() { + return branchingFactor_; + } + + @Override protected long newMemTableFactoryHandle() { + return newMemTableFactoryHandle( + bucketCount_, height_, branchingFactor_); + } + + private native long newMemTableFactoryHandle( + long bucketCount, int height, int branchingFactor) + throws IllegalArgumentException; + + private long bucketCount_; + private int branchingFactor_; + private int height_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java b/src/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java new file mode 100644 index 000000000..81d890883 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java @@ -0,0 +1,75 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class HistogramData { + private final double median_; + private final double percentile95_; + private final double percentile99_; + private final double average_; + private final double standardDeviation_; + private final double max_; + private final long count_; + private final long sum_; + private final double min_; + + public HistogramData(final double median, final double percentile95, + final double percentile99, final double average, + final double standardDeviation) { + this(median, percentile95, percentile99, average, standardDeviation, 0.0, 0, 0, 0.0); + } + + public HistogramData(final double median, final double percentile95, + final double percentile99, final double average, + final double standardDeviation, final double max, final long count, + final long sum, final double min) { + median_ = median; + percentile95_ = percentile95; + percentile99_ = percentile99; + average_ = average; + standardDeviation_ = standardDeviation; + min_ = min; + max_ = max; + count_ = count; + sum_ = sum; + } + + public double getMedian() { + return median_; + } + + public double getPercentile95() { + return percentile95_; + } + + public double getPercentile99() { + return percentile99_; + } + + public double getAverage() { + return average_; + } + + public double getStandardDeviation() { + return standardDeviation_; + } + + public double getMax() { + return max_; + } + + public long getCount() { + return count_; + } + + public long getSum() { + return sum_; + } + + public double getMin() { + return min_; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java b/src/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java new file mode 100644 index 000000000..d5f7da5e0 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java @@ -0,0 +1,221 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum HistogramType { + + DB_GET((byte) 0x0), + + DB_WRITE((byte) 0x1), + + COMPACTION_TIME((byte) 0x2), + + SUBCOMPACTION_SETUP_TIME((byte) 0x3), + + TABLE_SYNC_MICROS((byte) 0x4), + + COMPACTION_OUTFILE_SYNC_MICROS((byte) 0x5), + + WAL_FILE_SYNC_MICROS((byte) 0x6), + + MANIFEST_FILE_SYNC_MICROS((byte) 0x7), + + /** + * TIME SPENT IN IO DURING TABLE OPEN. + */ + TABLE_OPEN_IO_MICROS((byte) 0x8), + + DB_MULTIGET((byte) 0x9), + + READ_BLOCK_COMPACTION_MICROS((byte) 0xA), + + READ_BLOCK_GET_MICROS((byte) 0xB), + + WRITE_RAW_BLOCK_MICROS((byte) 0xC), + + STALL_L0_SLOWDOWN_COUNT((byte) 0xD), + + STALL_MEMTABLE_COMPACTION_COUNT((byte) 0xE), + + STALL_L0_NUM_FILES_COUNT((byte) 0xF), + + HARD_RATE_LIMIT_DELAY_COUNT((byte) 0x10), + + SOFT_RATE_LIMIT_DELAY_COUNT((byte) 0x11), + + NUM_FILES_IN_SINGLE_COMPACTION((byte) 0x12), + + DB_SEEK((byte) 0x13), + + WRITE_STALL((byte) 0x14), + + SST_READ_MICROS((byte) 0x15), + + /** + * The number of subcompactions actually scheduled during a compaction. + */ + NUM_SUBCOMPACTIONS_SCHEDULED((byte) 0x16), + + /** + * Value size distribution in each operation. + */ + BYTES_PER_READ((byte) 0x17), + BYTES_PER_WRITE((byte) 0x18), + BYTES_PER_MULTIGET((byte) 0x19), + + /** + * number of bytes compressed. + */ + BYTES_COMPRESSED((byte) 0x1A), + + /** + * number of bytes decompressed. + * + * number of bytes is when uncompressed; i.e. before/after respectively + */ + BYTES_DECOMPRESSED((byte) 0x1B), + + COMPRESSION_TIMES_NANOS((byte) 0x1C), + + DECOMPRESSION_TIMES_NANOS((byte) 0x1D), + + READ_NUM_MERGE_OPERANDS((byte) 0x1E), + + /** + * Time spent flushing memtable to disk. + */ + FLUSH_TIME((byte) 0x20), + + /** + * Size of keys written to BlobDB. + */ + BLOB_DB_KEY_SIZE((byte) 0x21), + + /** + * Size of values written to BlobDB. + */ + BLOB_DB_VALUE_SIZE((byte) 0x22), + + /** + * BlobDB Put/PutWithTTL/PutUntil/Write latency. + */ + BLOB_DB_WRITE_MICROS((byte) 0x23), + + /** + * BlobDB Get lagency. + */ + BLOB_DB_GET_MICROS((byte) 0x24), + + /** + * BlobDB MultiGet latency. + */ + BLOB_DB_MULTIGET_MICROS((byte) 0x25), + + /** + * BlobDB Seek/SeekToFirst/SeekToLast/SeekForPrev latency. + */ + BLOB_DB_SEEK_MICROS((byte) 0x26), + + /** + * BlobDB Next latency. + */ + BLOB_DB_NEXT_MICROS((byte) 0x27), + + /** + * BlobDB Prev latency. + */ + BLOB_DB_PREV_MICROS((byte) 0x28), + + /** + * Blob file write latency. + */ + BLOB_DB_BLOB_FILE_WRITE_MICROS((byte) 0x29), + + /** + * Blob file read latency. + */ + BLOB_DB_BLOB_FILE_READ_MICROS((byte) 0x2A), + + /** + * Blob file sync latency. + */ + BLOB_DB_BLOB_FILE_SYNC_MICROS((byte) 0x2B), + + /** + * BlobDB garbage collection time. + */ + BLOB_DB_GC_MICROS((byte) 0x2C), + + /** + * BlobDB compression time. + */ + BLOB_DB_COMPRESSION_MICROS((byte) 0x2D), + + /** + * BlobDB decompression time. + */ + BLOB_DB_DECOMPRESSION_MICROS((byte) 0x2E), + + /** + * Num of Index and Filter blocks read from file system per level in MultiGet + * request + */ + NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL((byte) 0x2F), + + /** + * Num of Data blocks read from file system per level in MultiGet request. + */ + NUM_DATA_BLOCKS_READ_PER_LEVEL((byte) 0x30), + + /** + * Num of SST files read from file system per level in MultiGet request. + */ + NUM_SST_READ_PER_LEVEL((byte) 0x31), + + /** + * The number of retry in auto resume + */ + ERROR_HANDLER_AUTORESUME_RETRY_COUNT((byte) 0x32), + + ASYNC_READ_BYTES((byte) 0x33), + + // 0x1F for backwards compatibility on current minor version. + HISTOGRAM_ENUM_MAX((byte) 0x1F); + + private final byte value; + + HistogramType(final byte value) { + this.value = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + * Get Histogram type by byte value. + * + * @param value byte representation of HistogramType. + * + * @return {@link org.rocksdb.HistogramType} instance. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static HistogramType getHistogramType(final byte value) { + for (final HistogramType histogramType : HistogramType.values()) { + if (histogramType.getValue() == value) { + return histogramType; + } + } + throw new IllegalArgumentException( + "Illegal value provided for HistogramType."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Holder.java b/src/rocksdb/java/src/main/java/org/rocksdb/Holder.java new file mode 100644 index 000000000..716a0bda0 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Holder.java @@ -0,0 +1,46 @@ +// Copyright (c) 2016, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Simple instance reference wrapper. + */ +public class Holder { + private /* @Nullable */ T value; + + /** + * Constructs a new Holder with null instance. + */ + public Holder() { + } + + /** + * Constructs a new Holder. + * + * @param value the instance or null + */ + public Holder(/* @Nullable */ final T value) { + this.value = value; + } + + /** + * Get the instance reference. + * + * @return value the instance reference or null + */ + public /* @Nullable */ T getValue() { + return value; + } + + /** + * Set the instance reference. + * + * @param value the instance reference or null + */ + public void setValue(/* @Nullable */ final T value) { + this.value = value; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/IndexShorteningMode.java b/src/rocksdb/java/src/main/java/org/rocksdb/IndexShorteningMode.java new file mode 100644 index 000000000..a68346c38 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/IndexShorteningMode.java @@ -0,0 +1,60 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +/** + * This enum allows trading off increased index size for improved iterator + * seek performance in some situations, particularly when block cache is + * disabled ({@link ReadOptions#fillCache()} == false and direct IO is + * enabled ({@link DBOptions#useDirectReads()} == true). + * The default mode is the best tradeoff for most use cases. + * This option only affects newly written tables. + * + * The index contains a key separating each pair of consecutive blocks. + * Let A be the highest key in one block, B the lowest key in the next block, + * and I the index entry separating these two blocks: + * [ ... A] I [B ...] + * I is allowed to be anywhere in [A, B). + * If an iterator is seeked to a key in (A, I], we'll unnecessarily read the + * first block, then immediately fall through to the second block. + * However, if I=A, this can't happen, and we'll read only the second block. + * In kNoShortening mode, we use I=A. In other modes, we use the shortest + * key in [A, B), which usually significantly reduces index size. + * + * There's a similar story for the last index entry, which is an upper bound + * of the highest key in the file. If it's shortened and therefore + * overestimated, iterator is likely to unnecessarily read the last data block + * from each file on each seek. + */ +public enum IndexShorteningMode { + /** + * Use full keys. + */ + kNoShortening((byte) 0), + /** + * Shorten index keys between blocks, but use full key for the last index + * key, which is the upper bound of the whole file. + */ + kShortenSeparators((byte) 1), + /** + * Shorten both keys between blocks and key after last block. + */ + kShortenSeparatorsAndSuccessor((byte) 2); + + private final byte value; + + IndexShorteningMode(final byte value) { + this.value = value; + } + + /** + * Returns the byte value of the enumerations value. + * + * @return byte representation + */ + byte getValue() { + return value; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/IndexType.java b/src/rocksdb/java/src/main/java/org/rocksdb/IndexType.java new file mode 100644 index 000000000..162edad1b --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/IndexType.java @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * IndexType used in conjunction with BlockBasedTable. + */ +public enum IndexType { + /** + * A space efficient index block that is optimized for + * binary-search-based index. + */ + kBinarySearch((byte) 0), + /** + * The hash index, if enabled, will do the hash lookup when + * {@code Options.prefix_extractor} is provided. + */ + kHashSearch((byte) 1), + /** + * A two-level index implementation. Both levels are binary search indexes. + */ + kTwoLevelIndexSearch((byte) 2), + /** + * Like {@link #kBinarySearch}, but index also contains first key of each block. + * This allows iterators to defer reading the block until it's actually + * needed. May significantly reduce read amplification of short range scans. + * Without it, iterator seek usually reads one block from each level-0 file + * and from each level, which may be expensive. + * Works best in combination with: + * - IndexShorteningMode::kNoShortening, + * - custom FlushBlockPolicy to cut blocks at some meaningful boundaries, + * e.g. when prefix changes. + * Makes the index significantly bigger (2x or more), especially when keys + * are long. + */ + kBinarySearchWithFirstKey((byte) 3); + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + IndexType(byte value) { + value_ = value; + } + + private final byte value_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java b/src/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java new file mode 100644 index 000000000..b7c0f0700 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java @@ -0,0 +1,49 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +/** + * RocksDB log levels. + */ +public enum InfoLogLevel { + DEBUG_LEVEL((byte)0), + INFO_LEVEL((byte)1), + WARN_LEVEL((byte)2), + ERROR_LEVEL((byte)3), + FATAL_LEVEL((byte)4), + HEADER_LEVEL((byte)5), + NUM_INFO_LOG_LEVELS((byte)6); + + private final byte value_; + + private InfoLogLevel(final byte value) { + value_ = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + /** + * Get InfoLogLevel by byte value. + * + * @param value byte representation of InfoLogLevel. + * + * @return {@link org.rocksdb.InfoLogLevel} instance. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static InfoLogLevel getInfoLogLevel(final byte value) { + for (final InfoLogLevel infoLogLevel : InfoLogLevel.values()) { + if (infoLogLevel.getValue() == value) { + return infoLogLevel; + } + } + throw new IllegalArgumentException( + "Illegal value provided for InfoLogLevel."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java new file mode 100644 index 000000000..a6a308daa --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/IngestExternalFileOptions.java @@ -0,0 +1,227 @@ +package org.rocksdb; +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +import java.util.List; + +/** + * IngestExternalFileOptions is used by + * {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)}. + */ +public class IngestExternalFileOptions extends RocksObject { + + public IngestExternalFileOptions() { + super(newIngestExternalFileOptions()); + } + + /** + * @param moveFiles {@link #setMoveFiles(boolean)} + * @param snapshotConsistency {@link #setSnapshotConsistency(boolean)} + * @param allowGlobalSeqNo {@link #setAllowGlobalSeqNo(boolean)} + * @param allowBlockingFlush {@link #setAllowBlockingFlush(boolean)} + */ + public IngestExternalFileOptions(final boolean moveFiles, + final boolean snapshotConsistency, final boolean allowGlobalSeqNo, + final boolean allowBlockingFlush) { + super(newIngestExternalFileOptions(moveFiles, snapshotConsistency, + allowGlobalSeqNo, allowBlockingFlush)); + } + + /** + * Can be set to true to move the files instead of copying them. + * + * @return true if files will be moved + */ + public boolean moveFiles() { + return moveFiles(nativeHandle_); + } + + /** + * Can be set to true to move the files instead of copying them. + * + * @param moveFiles true if files should be moved instead of copied + * + * @return the reference to the current IngestExternalFileOptions. + */ + public IngestExternalFileOptions setMoveFiles(final boolean moveFiles) { + setMoveFiles(nativeHandle_, moveFiles); + return this; + } + + /** + * If set to false, an ingested file keys could appear in existing snapshots + * that where created before the file was ingested. + * + * @return true if snapshot consistency is assured + */ + public boolean snapshotConsistency() { + return snapshotConsistency(nativeHandle_); + } + + /** + * If set to false, an ingested file keys could appear in existing snapshots + * that where created before the file was ingested. + * + * @param snapshotConsistency true if snapshot consistency is required + * + * @return the reference to the current IngestExternalFileOptions. + */ + public IngestExternalFileOptions setSnapshotConsistency( + final boolean snapshotConsistency) { + setSnapshotConsistency(nativeHandle_, snapshotConsistency); + return this; + } + + /** + * If set to false, {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)} + * will fail if the file key range overlaps with existing keys or tombstones in the DB. + * + * @return true if global seq numbers are assured + */ + public boolean allowGlobalSeqNo() { + return allowGlobalSeqNo(nativeHandle_); + } + + /** + * If set to false, {@link RocksDB#ingestExternalFile(ColumnFamilyHandle, List, IngestExternalFileOptions)} + * will fail if the file key range overlaps with existing keys or tombstones in the DB. + * + * @param allowGlobalSeqNo true if global seq numbers are required + * + * @return the reference to the current IngestExternalFileOptions. + */ + public IngestExternalFileOptions setAllowGlobalSeqNo( + final boolean allowGlobalSeqNo) { + setAllowGlobalSeqNo(nativeHandle_, allowGlobalSeqNo); + return this; + } + + /** + * If set to false and the file key range overlaps with the memtable key range + * (memtable flush required), IngestExternalFile will fail. + * + * @return true if blocking flushes may occur + */ + public boolean allowBlockingFlush() { + return allowBlockingFlush(nativeHandle_); + } + + /** + * If set to false and the file key range overlaps with the memtable key range + * (memtable flush required), IngestExternalFile will fail. + * + * @param allowBlockingFlush true if blocking flushes are allowed + * + * @return the reference to the current IngestExternalFileOptions. + */ + public IngestExternalFileOptions setAllowBlockingFlush( + final boolean allowBlockingFlush) { + setAllowBlockingFlush(nativeHandle_, allowBlockingFlush); + return this; + } + + /** + * Returns true if duplicate keys in the file being ingested are + * to be skipped rather than overwriting existing data under that key. + * + * @return true if duplicate keys in the file being ingested are to be + * skipped, false otherwise. + */ + public boolean ingestBehind() { + return ingestBehind(nativeHandle_); + } + + /** + * Set to true if you would like duplicate keys in the file being ingested + * to be skipped rather than overwriting existing data under that key. + * + * Usecase: back-fill of some historical data in the database without + * over-writing existing newer version of data. + * + * This option could only be used if the DB has been running + * with DBOptions#allowIngestBehind() == true since the dawn of time. + * + * All files will be ingested at the bottommost level with seqno=0. + * + * Default: false + * + * @param ingestBehind true if you would like duplicate keys in the file being + * ingested to be skipped. + * + * @return the reference to the current IngestExternalFileOptions. + */ + public IngestExternalFileOptions setIngestBehind(final boolean ingestBehind) { + setIngestBehind(nativeHandle_, ingestBehind); + return this; + } + + /** + * Returns true write if the global_seqno is written to a given offset + * in the external SST file for backward compatibility. + * + * See {@link #setWriteGlobalSeqno(boolean)}. + * + * @return true if the global_seqno is written to a given offset, + * false otherwise. + */ + public boolean writeGlobalSeqno() { + return writeGlobalSeqno(nativeHandle_); + } + + /** + * Set to true if you would like to write the global_seqno to a given offset + * in the external SST file for backward compatibility. + * + * Older versions of RocksDB write the global_seqno to a given offset within + * the ingested SST files, and new versions of RocksDB do not. + * + * If you ingest an external SST using new version of RocksDB and would like + * to be able to downgrade to an older version of RocksDB, you should set + * {@link #writeGlobalSeqno()} to true. + * + * If your service is just starting to use the new RocksDB, we recommend that + * you set this option to false, which brings two benefits: + * 1. No extra random write for global_seqno during ingestion. + * 2. Without writing external SST file, it's possible to do checksum. + * + * We have a plan to set this option to false by default in the future. + * + * Default: true + * + * @param writeGlobalSeqno true to write the gloal_seqno to a given offset, + * false otherwise + * + * @return the reference to the current IngestExternalFileOptions. + */ + public IngestExternalFileOptions setWriteGlobalSeqno( + final boolean writeGlobalSeqno) { + setWriteGlobalSeqno(nativeHandle_, writeGlobalSeqno); + return this; + } + + private native static long newIngestExternalFileOptions(); + private native static long newIngestExternalFileOptions( + final boolean moveFiles, final boolean snapshotConsistency, + final boolean allowGlobalSeqNo, final boolean allowBlockingFlush); + @Override protected final native void disposeInternal(final long handle); + + private native boolean moveFiles(final long handle); + private native void setMoveFiles(final long handle, final boolean move_files); + private native boolean snapshotConsistency(final long handle); + private native void setSnapshotConsistency(final long handle, + final boolean snapshotConsistency); + private native boolean allowGlobalSeqNo(final long handle); + private native void setAllowGlobalSeqNo(final long handle, + final boolean allowGloablSeqNo); + private native boolean allowBlockingFlush(final long handle); + private native void setAllowBlockingFlush(final long handle, + final boolean allowBlockingFlush); + private native boolean ingestBehind(final long handle); + private native void setIngestBehind(final long handle, + final boolean ingestBehind); + private native boolean writeGlobalSeqno(final long handle); + private native void setWriteGlobalSeqno(final long handle, + final boolean writeGlobalSeqNo); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/KeyMayExist.java b/src/rocksdb/java/src/main/java/org/rocksdb/KeyMayExist.java new file mode 100644 index 000000000..36185d8c9 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/KeyMayExist.java @@ -0,0 +1,36 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Objects; + +public class KeyMayExist { + @Override + public boolean equals(final Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + final KeyMayExist that = (KeyMayExist) o; + return (valueLength == that.valueLength && exists == that.exists); + } + + @Override + public int hashCode() { + return Objects.hash(exists, valueLength); + } + + public enum KeyMayExistEnum { kNotExist, kExistsWithoutValue, kExistsWithValue } + ; + + public KeyMayExist(final KeyMayExistEnum exists, final int valueLength) { + this.exists = exists; + this.valueLength = valueLength; + } + + public final KeyMayExistEnum exists; + public final int valueLength; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java b/src/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java new file mode 100644 index 000000000..db90b17c5 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/LRUCache.java @@ -0,0 +1,106 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Least Recently Used Cache + */ +public class LRUCache extends Cache { + + /** + * Create a new cache with a fixed size capacity + * + * @param capacity The fixed size capacity of the cache + */ + public LRUCache(final long capacity) { + this(capacity, -1, false, 0.0, 0.0); + } + + /** + * Create a new cache with a fixed size capacity. The cache is sharded + * to 2^numShardBits shards, by hash of the key. The total capacity + * is divided and evenly assigned to each shard. + * numShardBits = -1 means it is automatically determined: every shard + * will be at least 512KB and number of shard bits will not exceed 6. + * + * @param capacity The fixed size capacity of the cache + * @param numShardBits The cache is sharded to 2^numShardBits shards, + * by hash of the key + */ + public LRUCache(final long capacity, final int numShardBits) { + super(newLRUCache(capacity, numShardBits, false, 0.0, 0.0)); + } + + /** + * Create a new cache with a fixed size capacity. The cache is sharded + * to 2^numShardBits shards, by hash of the key. The total capacity + * is divided and evenly assigned to each shard. If strictCapacityLimit + * is set, insert to the cache will fail when cache is full. + * numShardBits = -1 means it is automatically determined: every shard + * will be at least 512KB and number of shard bits will not exceed 6. + * + * @param capacity The fixed size capacity of the cache + * @param numShardBits The cache is sharded to 2^numShardBits shards, + * by hash of the key + * @param strictCapacityLimit insert to the cache will fail when cache is full + */ + public LRUCache(final long capacity, final int numShardBits, + final boolean strictCapacityLimit) { + super(newLRUCache(capacity, numShardBits, strictCapacityLimit, 0.0, 0.0)); + } + + /** + * Create a new cache with a fixed size capacity. The cache is sharded + * to 2^numShardBits shards, by hash of the key. The total capacity + * is divided and evenly assigned to each shard. If strictCapacityLimit + * is set, insert to the cache will fail when cache is full. User can also + * set percentage of the cache reserves for high priority entries via + * highPriPoolRatio. + * numShardBits = -1 means it is automatically determined: every shard + * will be at least 512KB and number of shard bits will not exceed 6. + * + * @param capacity The fixed size capacity of the cache + * @param numShardBits The cache is sharded to 2^numShardBits shards, + * by hash of the key + * @param strictCapacityLimit insert to the cache will fail when cache is full + * @param highPriPoolRatio percentage of the cache reserves for high priority + * entries + */ + public LRUCache(final long capacity, final int numShardBits, final boolean strictCapacityLimit, + final double highPriPoolRatio) { + super(newLRUCache(capacity, numShardBits, strictCapacityLimit, highPriPoolRatio, 0.0)); + } + + /** + * Create a new cache with a fixed size capacity. The cache is sharded + * to 2^numShardBits shards, by hash of the key. The total capacity + * is divided and evenly assigned to each shard. If strictCapacityLimit + * is set, insert to the cache will fail when cache is full. User can also + * set percentage of the cache reserves for high priority entries and low + * priority entries via highPriPoolRatio and lowPriPoolRatio. + * numShardBits = -1 means it is automatically determined: every shard + * will be at least 512KB and number of shard bits will not exceed 6. + * + * @param capacity The fixed size capacity of the cache + * @param numShardBits The cache is sharded to 2^numShardBits shards, + * by hash of the key + * @param strictCapacityLimit insert to the cache will fail when cache is full + * @param highPriPoolRatio percentage of the cache reserves for high priority + * entries + * @param lowPriPoolRatio percentage of the cache reserves for low priority + * entries + */ + public LRUCache(final long capacity, final int numShardBits, final boolean strictCapacityLimit, + final double highPriPoolRatio, final double lowPriPoolRatio) { + super(newLRUCache( + capacity, numShardBits, strictCapacityLimit, highPriPoolRatio, lowPriPoolRatio)); + } + + private native static long newLRUCache(final long capacity, final int numShardBits, + final boolean strictCapacityLimit, final double highPriPoolRatio, + final double lowPriPoolRatio); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/LevelMetaData.java b/src/rocksdb/java/src/main/java/org/rocksdb/LevelMetaData.java new file mode 100644 index 000000000..c5685098b --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/LevelMetaData.java @@ -0,0 +1,56 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Arrays; +import java.util.List; + +/** + * The metadata that describes a level. + */ +public class LevelMetaData { + private final int level; + private final long size; + private final SstFileMetaData[] files; + + /** + * Called from JNI C++ + */ + private LevelMetaData(final int level, final long size, + final SstFileMetaData[] files) { + this.level = level; + this.size = size; + this.files = files; + } + + /** + * The level which this meta data describes. + * + * @return the level + */ + public int level() { + return level; + } + + /** + * The size of this level in bytes, which is equal to the sum of + * the file size of its {@link #files()}. + * + * @return the size + */ + public long size() { + return size; + } + + /** + * The metadata of all sst files in this level. + * + * @return the metadata of the files + */ + public List files() { + return Arrays.asList(files); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/LiveFileMetaData.java b/src/rocksdb/java/src/main/java/org/rocksdb/LiveFileMetaData.java new file mode 100644 index 000000000..35d883e18 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/LiveFileMetaData.java @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The full set of metadata associated with each SST file. + */ +public class LiveFileMetaData extends SstFileMetaData { + private final byte[] columnFamilyName; + private final int level; + + /** + * Called from JNI C++ + */ + private LiveFileMetaData( + final byte[] columnFamilyName, + final int level, + final String fileName, + final String path, + final long size, + final long smallestSeqno, + final long largestSeqno, + final byte[] smallestKey, + final byte[] largestKey, + final long numReadsSampled, + final boolean beingCompacted, + final long numEntries, + final long numDeletions) { + super(fileName, path, size, smallestSeqno, largestSeqno, smallestKey, + largestKey, numReadsSampled, beingCompacted, numEntries, numDeletions); + this.columnFamilyName = columnFamilyName; + this.level = level; + } + + /** + * Get the name of the column family. + * + * @return the name of the column family + */ + public byte[] columnFamilyName() { + return columnFamilyName; + } + + /** + * Get the level at which this file resides. + * + * @return the level at which the file resides. + */ + public int level() { + return level; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/LogFile.java b/src/rocksdb/java/src/main/java/org/rocksdb/LogFile.java new file mode 100644 index 000000000..ef24a6427 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/LogFile.java @@ -0,0 +1,75 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class LogFile { + private final String pathName; + private final long logNumber; + private final WalFileType type; + private final long startSequence; + private final long sizeFileBytes; + + /** + * Called from JNI C++ + */ + private LogFile(final String pathName, final long logNumber, + final byte walFileTypeValue, final long startSequence, + final long sizeFileBytes) { + this.pathName = pathName; + this.logNumber = logNumber; + this.type = WalFileType.fromValue(walFileTypeValue); + this.startSequence = startSequence; + this.sizeFileBytes = sizeFileBytes; + } + + /** + * Returns log file's pathname relative to the main db dir + * Eg. For a live-log-file = /000003.log + * For an archived-log-file = /archive/000003.log + * + * @return log file's pathname + */ + public String pathName() { + return pathName; + } + + /** + * Primary identifier for log file. + * This is directly proportional to creation time of the log file + * + * @return the log number + */ + public long logNumber() { + return logNumber; + } + + /** + * Log file can be either alive or archived. + * + * @return the type of the log file. + */ + public WalFileType type() { + return type; + } + + /** + * Starting sequence number of writebatch written in this log file. + * + * @return the stating sequence number + */ + public long startSequence() { + return startSequence; + } + + /** + * Size of log file on disk in Bytes. + * + * @return size of log file + */ + public long sizeFileBytes() { + return sizeFileBytes; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Logger.java b/src/rocksdb/java/src/main/java/org/rocksdb/Logger.java new file mode 100644 index 000000000..00a5d5674 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Logger.java @@ -0,0 +1,122 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + *

This class provides a custom logger functionality + * in Java which wraps {@code RocksDB} logging facilities. + *

+ * + *

Using this class RocksDB can log with common + * Java logging APIs like Log4j or Slf4j without keeping + * database logs in the filesystem.

+ * + * Performance + *

There are certain performance penalties using a Java + * {@code Logger} implementation within production code. + *

+ * + *

+ * A log level can be set using {@link org.rocksdb.Options} or + * {@link Logger#setInfoLogLevel(InfoLogLevel)}. The set log level + * influences the underlying native code. Each log message is + * checked against the set log level and if the log level is more + * verbose as the set log level, native allocations will be made + * and data structures are allocated. + *

+ * + *

Every log message which will be emitted by native code will + * trigger expensive native to Java transitions. So the preferred + * setting for production use is either + * {@link org.rocksdb.InfoLogLevel#ERROR_LEVEL} or + * {@link org.rocksdb.InfoLogLevel#FATAL_LEVEL}. + *

+ */ +public abstract class Logger extends RocksCallbackObject { + + private final static long WITH_OPTIONS = 0; + private final static long WITH_DBOPTIONS = 1; + + /** + *

AbstractLogger constructor.

+ * + *

Important: the log level set within + * the {@link org.rocksdb.Options} instance will be used as + * maximum log level of RocksDB.

+ * + * @param options {@link org.rocksdb.Options} instance. + */ + public Logger(final Options options) { + super(options.nativeHandle_, WITH_OPTIONS); + + } + + /** + *

AbstractLogger constructor.

+ * + *

Important: the log level set within + * the {@link org.rocksdb.DBOptions} instance will be used + * as maximum log level of RocksDB.

+ * + * @param dboptions {@link org.rocksdb.DBOptions} instance. + */ + public Logger(final DBOptions dboptions) { + super(dboptions.nativeHandle_, WITH_DBOPTIONS); + } + + @Override + protected long initializeNative(long... nativeParameterHandles) { + if(nativeParameterHandles[1] == WITH_OPTIONS) { + return createNewLoggerOptions(nativeParameterHandles[0]); + } else if(nativeParameterHandles[1] == WITH_DBOPTIONS) { + return createNewLoggerDbOptions(nativeParameterHandles[0]); + } else { + throw new IllegalArgumentException(); + } + } + + /** + * Set {@link org.rocksdb.InfoLogLevel} to AbstractLogger. + * + * @param infoLogLevel {@link org.rocksdb.InfoLogLevel} instance. + */ + public void setInfoLogLevel(final InfoLogLevel infoLogLevel) { + setInfoLogLevel(nativeHandle_, infoLogLevel.getValue()); + } + + /** + * Return the loggers log level. + * + * @return {@link org.rocksdb.InfoLogLevel} instance. + */ + public InfoLogLevel infoLogLevel() { + return InfoLogLevel.getInfoLogLevel( + infoLogLevel(nativeHandle_)); + } + + protected abstract void log(InfoLogLevel infoLogLevel, + String logMsg); + + protected native long createNewLoggerOptions( + long options); + protected native long createNewLoggerDbOptions( + long dbOptions); + protected native void setInfoLogLevel(long handle, + byte infoLogLevel); + protected native byte infoLogLevel(long handle); + + /** + * We override {@link RocksCallbackObject#disposeInternal()} + * as disposing of a rocksdb::LoggerJniCallback requires + * a slightly different approach as it is a std::shared_ptr + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + private native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java new file mode 100644 index 000000000..83cee974a --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java @@ -0,0 +1,29 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +/** + * MemTableConfig is used to config the internal mem-table of a RocksDB. + * It is required for each memtable to have one such sub-class to allow + * Java developers to use it. + * + * To make a RocksDB to use a specific MemTable format, its associated + * MemTableConfig should be properly set and passed into Options + * via Options.setMemTableFactory() and open the db using that Options. + * + * @see Options + */ +public abstract class MemTableConfig { + /** + * This function should only be called by Options.setMemTableConfig(), + * which will create a c++ shared-pointer to the c++ MemTableRepFactory + * that associated with the Java MemTableConfig. + * + * @see Options#setMemTableConfig(MemTableConfig) + * + * @return native handle address to native memory table instance. + */ + abstract protected long newMemTableFactoryHandle(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MemTableInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/MemTableInfo.java new file mode 100644 index 000000000..f4fb577c3 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/MemTableInfo.java @@ -0,0 +1,103 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Objects; + +public class MemTableInfo { + private final String columnFamilyName; + private final long firstSeqno; + private final long earliestSeqno; + private final long numEntries; + private final long numDeletes; + + /** + * Access is package private as this will only be constructed from + * C++ via JNI and for testing. + */ + MemTableInfo(final String columnFamilyName, final long firstSeqno, final long earliestSeqno, + final long numEntries, final long numDeletes) { + this.columnFamilyName = columnFamilyName; + this.firstSeqno = firstSeqno; + this.earliestSeqno = earliestSeqno; + this.numEntries = numEntries; + this.numDeletes = numDeletes; + } + + /** + * Get the name of the column family to which memtable belongs. + * + * @return the name of the column family. + */ + public String getColumnFamilyName() { + return columnFamilyName; + } + + /** + * Get the Sequence number of the first element that was inserted into the + * memtable. + * + * @return the sequence number of the first inserted element. + */ + public long getFirstSeqno() { + return firstSeqno; + } + + /** + * Get the Sequence number that is guaranteed to be smaller than or equal + * to the sequence number of any key that could be inserted into this + * memtable. It can then be assumed that any write with a larger(or equal) + * sequence number will be present in this memtable or a later memtable. + * + * @return the earliest sequence number. + */ + public long getEarliestSeqno() { + return earliestSeqno; + } + + /** + * Get the total number of entries in memtable. + * + * @return the total number of entries. + */ + public long getNumEntries() { + return numEntries; + } + + /** + * Get the total number of deletes in memtable. + * + * @return the total number of deletes. + */ + public long getNumDeletes() { + return numDeletes; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + MemTableInfo that = (MemTableInfo) o; + return firstSeqno == that.firstSeqno && earliestSeqno == that.earliestSeqno + && numEntries == that.numEntries && numDeletes == that.numDeletes + && Objects.equals(columnFamilyName, that.columnFamilyName); + } + + @Override + public int hashCode() { + return Objects.hash(columnFamilyName, firstSeqno, earliestSeqno, numEntries, numDeletes); + } + + @Override + public String toString() { + return "MemTableInfo{" + + "columnFamilyName='" + columnFamilyName + '\'' + ", firstSeqno=" + firstSeqno + + ", earliestSeqno=" + earliestSeqno + ", numEntries=" + numEntries + + ", numDeletes=" + numDeletes + '}'; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUsageType.java b/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUsageType.java new file mode 100644 index 000000000..6010ce7af --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUsageType.java @@ -0,0 +1,72 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * MemoryUsageType + * + *

The value will be used as a key to indicate the type of memory usage + * described

+ */ +public enum MemoryUsageType { + /** + * Memory usage of all the mem-tables. + */ + kMemTableTotal((byte) 0), + /** + * Memory usage of those un-flushed mem-tables. + */ + kMemTableUnFlushed((byte) 1), + /** + * Memory usage of all the table readers. + */ + kTableReadersTotal((byte) 2), + /** + * Memory usage by Cache. + */ + kCacheTotal((byte) 3), + /** + * Max usage types - copied to keep 1:1 with native. + */ + kNumUsageTypes((byte) 4); + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + /** + *

Get the MemoryUsageType enumeration value by + * passing the byte identifier to this method.

+ * + * @param byteIdentifier of MemoryUsageType. + * + * @return MemoryUsageType instance. + * + * @throws IllegalArgumentException if the usage type for the byteIdentifier + * cannot be found + */ + public static MemoryUsageType getMemoryUsageType(final byte byteIdentifier) { + for (final MemoryUsageType memoryUsageType : MemoryUsageType.values()) { + if (memoryUsageType.getValue() == byteIdentifier) { + return memoryUsageType; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for MemoryUsageType."); + } + + MemoryUsageType(byte value) { + value_ = value; + } + + private final byte value_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUtil.java b/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUtil.java new file mode 100644 index 000000000..52b2175e6 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/MemoryUtil.java @@ -0,0 +1,60 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.*; + +/** + * JNI passthrough for MemoryUtil. + */ +public class MemoryUtil { + + /** + *

Returns the approximate memory usage of different types in the input + * list of DBs and Cache set. For instance, in the output map the key + * kMemTableTotal will be associated with the memory + * usage of all the mem-tables from all the input rocksdb instances.

+ * + *

Note that for memory usage inside Cache class, we will + * only report the usage of the input "cache_set" without + * including those Cache usage inside the input list "dbs" + * of DBs.

+ * + * @param dbs List of dbs to collect memory usage for. + * @param caches Set of caches to collect memory usage for. + * @return Map from {@link MemoryUsageType} to memory usage as a {@link Long}. + */ + public static Map getApproximateMemoryUsageByType(final List dbs, final Set caches) { + int dbCount = (dbs == null) ? 0 : dbs.size(); + int cacheCount = (caches == null) ? 0 : caches.size(); + long[] dbHandles = new long[dbCount]; + long[] cacheHandles = new long[cacheCount]; + if (dbCount > 0) { + ListIterator dbIter = dbs.listIterator(); + while (dbIter.hasNext()) { + dbHandles[dbIter.nextIndex()] = dbIter.next().nativeHandle_; + } + } + if (cacheCount > 0) { + // NOTE: This index handling is super ugly but I couldn't get a clean way to track both the + // index and the iterator simultaneously within a Set. + int i = 0; + for (Cache cache : caches) { + cacheHandles[i] = cache.nativeHandle_; + i++; + } + } + Map byteOutput = getApproximateMemoryUsageByType(dbHandles, cacheHandles); + Map output = new HashMap<>(); + for(Map.Entry longEntry : byteOutput.entrySet()) { + output.put(MemoryUsageType.getMemoryUsageType(longEntry.getKey()), longEntry.getValue()); + } + return output; + } + + private native static Map getApproximateMemoryUsageByType(final long[] dbHandles, + final long[] cacheHandles); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java b/src/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java new file mode 100644 index 000000000..c299f6221 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java @@ -0,0 +1,18 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * MergeOperator holds an operator to be applied when compacting + * two merge operands held under the same key in order to obtain a single + * value. + */ +public abstract class MergeOperator extends RocksObject { + protected MergeOperator(final long nativeHandle) { + super(nativeHandle); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java new file mode 100644 index 000000000..af28fa8ce --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptions.java @@ -0,0 +1,623 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.*; + +public class MutableColumnFamilyOptions + extends AbstractMutableOptions { + + /** + * User must use builder pattern, or parser. + * + * @param keys the keys + * @param values the values + * + * See {@link #builder()} and {@link #parse(String)}. + */ + private MutableColumnFamilyOptions(final String[] keys, + final String[] values) { + super(keys, values); + } + + /** + * Creates a builder which allows you + * to set MutableColumnFamilyOptions in a fluent + * manner + * + * @return A builder for MutableColumnFamilyOptions + */ + public static MutableColumnFamilyOptionsBuilder builder() { + return new MutableColumnFamilyOptionsBuilder(); + } + + /** + * Parses a String representation of MutableColumnFamilyOptions + * + * The format is: key1=value1;key2=value2;key3=value3 etc + * + * For int[] values, each int should be separated by a colon, e.g. + * + * key1=value1;intArrayKey1=1:2:3 + * + * @param str The string representation of the mutable column family options + * @param ignoreUnknown what to do if the key is not one of the keys we expect + * + * @return A builder for the mutable column family options + */ + public static MutableColumnFamilyOptionsBuilder parse( + final String str, final boolean ignoreUnknown) { + Objects.requireNonNull(str); + + final List parsedOptions = OptionString.Parser.parse(str); + return new MutableColumnFamilyOptionsBuilder().fromParsed(parsedOptions, ignoreUnknown); + } + + public static MutableColumnFamilyOptionsBuilder parse(final String str) { + return parse(str, false); + } + + private interface MutableColumnFamilyOptionKey extends MutableOptionKey {} + + public enum MemtableOption implements MutableColumnFamilyOptionKey { + write_buffer_size(ValueType.LONG), + arena_block_size(ValueType.LONG), + memtable_prefix_bloom_size_ratio(ValueType.DOUBLE), + memtable_whole_key_filtering(ValueType.BOOLEAN), + @Deprecated memtable_prefix_bloom_bits(ValueType.INT), + @Deprecated memtable_prefix_bloom_probes(ValueType.INT), + memtable_huge_page_size(ValueType.LONG), + max_successive_merges(ValueType.LONG), + @Deprecated filter_deletes(ValueType.BOOLEAN), + max_write_buffer_number(ValueType.INT), + inplace_update_num_locks(ValueType.LONG), + experimental_mempurge_threshold(ValueType.DOUBLE); + + private final ValueType valueType; + MemtableOption(final ValueType valueType) { + this.valueType = valueType; + } + + @Override + public ValueType getValueType() { + return valueType; + } + } + + public enum CompactionOption implements MutableColumnFamilyOptionKey { + disable_auto_compactions(ValueType.BOOLEAN), + soft_pending_compaction_bytes_limit(ValueType.LONG), + hard_pending_compaction_bytes_limit(ValueType.LONG), + level0_file_num_compaction_trigger(ValueType.INT), + level0_slowdown_writes_trigger(ValueType.INT), + level0_stop_writes_trigger(ValueType.INT), + max_compaction_bytes(ValueType.LONG), + target_file_size_base(ValueType.LONG), + target_file_size_multiplier(ValueType.INT), + max_bytes_for_level_base(ValueType.LONG), + max_bytes_for_level_multiplier(ValueType.INT), + max_bytes_for_level_multiplier_additional(ValueType.INT_ARRAY), + ttl(ValueType.LONG), + periodic_compaction_seconds(ValueType.LONG); + + private final ValueType valueType; + CompactionOption(final ValueType valueType) { + this.valueType = valueType; + } + + @Override + public ValueType getValueType() { + return valueType; + } + } + + public enum BlobOption implements MutableColumnFamilyOptionKey { + enable_blob_files(ValueType.BOOLEAN), + min_blob_size(ValueType.LONG), + blob_file_size(ValueType.LONG), + blob_compression_type(ValueType.ENUM), + enable_blob_garbage_collection(ValueType.BOOLEAN), + blob_garbage_collection_age_cutoff(ValueType.DOUBLE), + blob_garbage_collection_force_threshold(ValueType.DOUBLE), + blob_compaction_readahead_size(ValueType.LONG), + blob_file_starting_level(ValueType.INT), + prepopulate_blob_cache(ValueType.ENUM); + + private final ValueType valueType; + BlobOption(final ValueType valueType) { + this.valueType = valueType; + } + + @Override + public ValueType getValueType() { + return valueType; + } + } + + public enum MiscOption implements MutableColumnFamilyOptionKey { + max_sequential_skip_in_iterations(ValueType.LONG), + paranoid_file_checks(ValueType.BOOLEAN), + report_bg_io_stats(ValueType.BOOLEAN), + compression(ValueType.ENUM); + + private final ValueType valueType; + MiscOption(final ValueType valueType) { + this.valueType = valueType; + } + + @Override + public ValueType getValueType() { + return valueType; + } + } + + public static class MutableColumnFamilyOptionsBuilder + extends AbstractMutableOptionsBuilder + implements MutableColumnFamilyOptionsInterface { + + private final static Map ALL_KEYS_LOOKUP = new HashMap<>(); + static { + for(final MutableColumnFamilyOptionKey key : MemtableOption.values()) { + ALL_KEYS_LOOKUP.put(key.name(), key); + } + + for(final MutableColumnFamilyOptionKey key : CompactionOption.values()) { + ALL_KEYS_LOOKUP.put(key.name(), key); + } + + for(final MutableColumnFamilyOptionKey key : MiscOption.values()) { + ALL_KEYS_LOOKUP.put(key.name(), key); + } + + for (final MutableColumnFamilyOptionKey key : BlobOption.values()) { + ALL_KEYS_LOOKUP.put(key.name(), key); + } + } + + private MutableColumnFamilyOptionsBuilder() { + super(); + } + + @Override + protected MutableColumnFamilyOptionsBuilder self() { + return this; + } + + @Override + protected Map allKeys() { + return ALL_KEYS_LOOKUP; + } + + @Override + protected MutableColumnFamilyOptions build(final String[] keys, + final String[] values) { + return new MutableColumnFamilyOptions(keys, values); + } + + @Override + public MutableColumnFamilyOptionsBuilder setWriteBufferSize( + final long writeBufferSize) { + return setLong(MemtableOption.write_buffer_size, writeBufferSize); + } + + @Override + public long writeBufferSize() { + return getLong(MemtableOption.write_buffer_size); + } + + @Override + public MutableColumnFamilyOptionsBuilder setArenaBlockSize( + final long arenaBlockSize) { + return setLong(MemtableOption.arena_block_size, arenaBlockSize); + } + + @Override + public long arenaBlockSize() { + return getLong(MemtableOption.arena_block_size); + } + + @Override + public MutableColumnFamilyOptionsBuilder setMemtablePrefixBloomSizeRatio( + final double memtablePrefixBloomSizeRatio) { + return setDouble(MemtableOption.memtable_prefix_bloom_size_ratio, + memtablePrefixBloomSizeRatio); + } + + @Override + public double memtablePrefixBloomSizeRatio() { + return getDouble(MemtableOption.memtable_prefix_bloom_size_ratio); + } + + @Override + public MutableColumnFamilyOptionsBuilder setMemtableWholeKeyFiltering( + final boolean memtableWholeKeyFiltering) { + return setBoolean(MemtableOption.memtable_whole_key_filtering, memtableWholeKeyFiltering); + } + + @Override + public boolean memtableWholeKeyFiltering() { + return getBoolean(MemtableOption.memtable_whole_key_filtering); + } + + @Override + public MutableColumnFamilyOptionsBuilder setMemtableHugePageSize( + final long memtableHugePageSize) { + return setLong(MemtableOption.memtable_huge_page_size, + memtableHugePageSize); + } + + @Override + public long memtableHugePageSize() { + return getLong(MemtableOption.memtable_huge_page_size); + } + + @Override + public MutableColumnFamilyOptionsBuilder setMaxSuccessiveMerges( + final long maxSuccessiveMerges) { + return setLong(MemtableOption.max_successive_merges, maxSuccessiveMerges); + } + + @Override + public long maxSuccessiveMerges() { + return getLong(MemtableOption.max_successive_merges); + } + + @Override + public MutableColumnFamilyOptionsBuilder setMaxWriteBufferNumber( + final int maxWriteBufferNumber) { + return setInt(MemtableOption.max_write_buffer_number, + maxWriteBufferNumber); + } + + @Override + public int maxWriteBufferNumber() { + return getInt(MemtableOption.max_write_buffer_number); + } + + @Override + public MutableColumnFamilyOptionsBuilder setInplaceUpdateNumLocks( + final long inplaceUpdateNumLocks) { + return setLong(MemtableOption.inplace_update_num_locks, + inplaceUpdateNumLocks); + } + + @Override + public long inplaceUpdateNumLocks() { + return getLong(MemtableOption.inplace_update_num_locks); + } + + @Override + public MutableColumnFamilyOptionsBuilder setExperimentalMempurgeThreshold( + final double experimentalMempurgeThreshold) { + return setDouble( + MemtableOption.experimental_mempurge_threshold, experimentalMempurgeThreshold); + } + + @Override + public double experimentalMempurgeThreshold() { + return getDouble(MemtableOption.experimental_mempurge_threshold); + } + + @Override + public MutableColumnFamilyOptionsBuilder setDisableAutoCompactions( + final boolean disableAutoCompactions) { + return setBoolean(CompactionOption.disable_auto_compactions, + disableAutoCompactions); + } + + @Override + public boolean disableAutoCompactions() { + return getBoolean(CompactionOption.disable_auto_compactions); + } + + @Override + public MutableColumnFamilyOptionsBuilder setSoftPendingCompactionBytesLimit( + final long softPendingCompactionBytesLimit) { + return setLong(CompactionOption.soft_pending_compaction_bytes_limit, + softPendingCompactionBytesLimit); + } + + @Override + public long softPendingCompactionBytesLimit() { + return getLong(CompactionOption.soft_pending_compaction_bytes_limit); + } + + @Override + public MutableColumnFamilyOptionsBuilder setHardPendingCompactionBytesLimit( + final long hardPendingCompactionBytesLimit) { + return setLong(CompactionOption.hard_pending_compaction_bytes_limit, + hardPendingCompactionBytesLimit); + } + + @Override + public long hardPendingCompactionBytesLimit() { + return getLong(CompactionOption.hard_pending_compaction_bytes_limit); + } + + @Override + public MutableColumnFamilyOptionsBuilder setLevel0FileNumCompactionTrigger( + final int level0FileNumCompactionTrigger) { + return setInt(CompactionOption.level0_file_num_compaction_trigger, + level0FileNumCompactionTrigger); + } + + @Override + public int level0FileNumCompactionTrigger() { + return getInt(CompactionOption.level0_file_num_compaction_trigger); + } + + @Override + public MutableColumnFamilyOptionsBuilder setLevel0SlowdownWritesTrigger( + final int level0SlowdownWritesTrigger) { + return setInt(CompactionOption.level0_slowdown_writes_trigger, + level0SlowdownWritesTrigger); + } + + @Override + public int level0SlowdownWritesTrigger() { + return getInt(CompactionOption.level0_slowdown_writes_trigger); + } + + @Override + public MutableColumnFamilyOptionsBuilder setLevel0StopWritesTrigger( + final int level0StopWritesTrigger) { + return setInt(CompactionOption.level0_stop_writes_trigger, + level0StopWritesTrigger); + } + + @Override + public int level0StopWritesTrigger() { + return getInt(CompactionOption.level0_stop_writes_trigger); + } + + @Override + public MutableColumnFamilyOptionsBuilder setMaxCompactionBytes(final long maxCompactionBytes) { + return setLong(CompactionOption.max_compaction_bytes, maxCompactionBytes); + } + + @Override + public long maxCompactionBytes() { + return getLong(CompactionOption.max_compaction_bytes); + } + + + @Override + public MutableColumnFamilyOptionsBuilder setTargetFileSizeBase( + final long targetFileSizeBase) { + return setLong(CompactionOption.target_file_size_base, + targetFileSizeBase); + } + + @Override + public long targetFileSizeBase() { + return getLong(CompactionOption.target_file_size_base); + } + + @Override + public MutableColumnFamilyOptionsBuilder setTargetFileSizeMultiplier( + final int targetFileSizeMultiplier) { + return setInt(CompactionOption.target_file_size_multiplier, + targetFileSizeMultiplier); + } + + @Override + public int targetFileSizeMultiplier() { + return getInt(CompactionOption.target_file_size_multiplier); + } + + @Override + public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelBase( + final long maxBytesForLevelBase) { + return setLong(CompactionOption.max_bytes_for_level_base, + maxBytesForLevelBase); + } + + @Override + public long maxBytesForLevelBase() { + return getLong(CompactionOption.max_bytes_for_level_base); + } + + @Override + public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelMultiplier( + final double maxBytesForLevelMultiplier) { + return setDouble(CompactionOption.max_bytes_for_level_multiplier, maxBytesForLevelMultiplier); + } + + @Override + public double maxBytesForLevelMultiplier() { + return getDouble(CompactionOption.max_bytes_for_level_multiplier); + } + + @Override + public MutableColumnFamilyOptionsBuilder setMaxBytesForLevelMultiplierAdditional( + final int[] maxBytesForLevelMultiplierAdditional) { + return setIntArray( + CompactionOption.max_bytes_for_level_multiplier_additional, + maxBytesForLevelMultiplierAdditional); + } + + @Override + public int[] maxBytesForLevelMultiplierAdditional() { + return getIntArray( + CompactionOption.max_bytes_for_level_multiplier_additional); + } + + @Override + public MutableColumnFamilyOptionsBuilder setMaxSequentialSkipInIterations( + final long maxSequentialSkipInIterations) { + return setLong(MiscOption.max_sequential_skip_in_iterations, + maxSequentialSkipInIterations); + } + + @Override + public long maxSequentialSkipInIterations() { + return getLong(MiscOption.max_sequential_skip_in_iterations); + } + + @Override + public MutableColumnFamilyOptionsBuilder setParanoidFileChecks( + final boolean paranoidFileChecks) { + return setBoolean(MiscOption.paranoid_file_checks, paranoidFileChecks); + } + + @Override + public boolean paranoidFileChecks() { + return getBoolean(MiscOption.paranoid_file_checks); + } + + @Override + public MutableColumnFamilyOptionsBuilder setCompressionType( + final CompressionType compressionType) { + return setEnum(MiscOption.compression, compressionType); + } + + @Override + public CompressionType compressionType() { + return (CompressionType) getEnum(MiscOption.compression); + } + + @Override + public MutableColumnFamilyOptionsBuilder setReportBgIoStats( + final boolean reportBgIoStats) { + return setBoolean(MiscOption.report_bg_io_stats, reportBgIoStats); + } + + @Override + public boolean reportBgIoStats() { + return getBoolean(MiscOption.report_bg_io_stats); + } + + @Override + public MutableColumnFamilyOptionsBuilder setTtl(final long ttl) { + return setLong(CompactionOption.ttl, ttl); + } + + @Override + public long ttl() { + return getLong(CompactionOption.ttl); + } + + @Override + public MutableColumnFamilyOptionsBuilder setPeriodicCompactionSeconds( + final long periodicCompactionSeconds) { + return setLong(CompactionOption.periodic_compaction_seconds, periodicCompactionSeconds); + } + + @Override + public long periodicCompactionSeconds() { + return getLong(CompactionOption.periodic_compaction_seconds); + } + + @Override + public MutableColumnFamilyOptionsBuilder setEnableBlobFiles(final boolean enableBlobFiles) { + return setBoolean(BlobOption.enable_blob_files, enableBlobFiles); + } + + @Override + public boolean enableBlobFiles() { + return getBoolean(BlobOption.enable_blob_files); + } + + @Override + public MutableColumnFamilyOptionsBuilder setMinBlobSize(final long minBlobSize) { + return setLong(BlobOption.min_blob_size, minBlobSize); + } + + @Override + public long minBlobSize() { + return getLong(BlobOption.min_blob_size); + } + + @Override + public MutableColumnFamilyOptionsBuilder setBlobFileSize(final long blobFileSize) { + return setLong(BlobOption.blob_file_size, blobFileSize); + } + + @Override + public long blobFileSize() { + return getLong(BlobOption.blob_file_size); + } + + @Override + public MutableColumnFamilyOptionsBuilder setBlobCompressionType( + final CompressionType compressionType) { + return setEnum(BlobOption.blob_compression_type, compressionType); + } + + @Override + public CompressionType blobCompressionType() { + return (CompressionType) getEnum(BlobOption.blob_compression_type); + } + + @Override + public MutableColumnFamilyOptionsBuilder setEnableBlobGarbageCollection( + final boolean enableBlobGarbageCollection) { + return setBoolean(BlobOption.enable_blob_garbage_collection, enableBlobGarbageCollection); + } + + @Override + public boolean enableBlobGarbageCollection() { + return getBoolean(BlobOption.enable_blob_garbage_collection); + } + + @Override + public MutableColumnFamilyOptionsBuilder setBlobGarbageCollectionAgeCutoff( + final double blobGarbageCollectionAgeCutoff) { + return setDouble( + BlobOption.blob_garbage_collection_age_cutoff, blobGarbageCollectionAgeCutoff); + } + + @Override + public double blobGarbageCollectionAgeCutoff() { + return getDouble(BlobOption.blob_garbage_collection_age_cutoff); + } + + @Override + public MutableColumnFamilyOptionsBuilder setBlobGarbageCollectionForceThreshold( + final double blobGarbageCollectionForceThreshold) { + return setDouble( + BlobOption.blob_garbage_collection_force_threshold, blobGarbageCollectionForceThreshold); + } + + @Override + public double blobGarbageCollectionForceThreshold() { + return getDouble(BlobOption.blob_garbage_collection_force_threshold); + } + + @Override + public MutableColumnFamilyOptionsBuilder setBlobCompactionReadaheadSize( + final long blobCompactionReadaheadSize) { + return setLong(BlobOption.blob_compaction_readahead_size, blobCompactionReadaheadSize); + } + + @Override + public long blobCompactionReadaheadSize() { + return getLong(BlobOption.blob_compaction_readahead_size); + } + + @Override + public MutableColumnFamilyOptionsBuilder setBlobFileStartingLevel( + final int blobFileStartingLevel) { + return setInt(BlobOption.blob_file_starting_level, blobFileStartingLevel); + } + + @Override + public int blobFileStartingLevel() { + return getInt(BlobOption.blob_file_starting_level); + } + + @Override + public MutableColumnFamilyOptionsBuilder setPrepopulateBlobCache( + final PrepopulateBlobCache prepopulateBlobCache) { + return setEnum(BlobOption.prepopulate_blob_cache, prepopulateBlobCache); + } + + @Override + public PrepopulateBlobCache prepopulateBlobCache() { + return (PrepopulateBlobCache) getEnum(BlobOption.prepopulate_blob_cache); + } + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java new file mode 100644 index 000000000..0f5fe7d78 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableColumnFamilyOptionsInterface.java @@ -0,0 +1,156 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public interface MutableColumnFamilyOptionsInterface< + T extends MutableColumnFamilyOptionsInterface> + extends AdvancedMutableColumnFamilyOptionsInterface { + /** + * Amount of data to build up in memory (backed by an unsorted log + * on disk) before converting to a sorted on-disk file. + * + * Larger values increase performance, especially during bulk loads. + * Up to {@code max_write_buffer_number} write buffers may be held in memory + * at the same time, so you may wish to adjust this parameter + * to control memory usage. + * + * Also, a larger write buffer will result in a longer recovery time + * the next time the database is opened. + * + * Default: 64MB + * @param writeBufferSize the size of write buffer. + * @return the instance of the current object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + T setWriteBufferSize(long writeBufferSize); + + /** + * Return size of write buffer size. + * + * @return size of write buffer. + * @see #setWriteBufferSize(long) + */ + long writeBufferSize(); + + /** + * Disable automatic compactions. Manual compactions can still + * be issued on this column family + * + * @param disableAutoCompactions true if auto-compactions are disabled. + * @return the reference to the current option. + */ + T setDisableAutoCompactions(boolean disableAutoCompactions); + + /** + * Disable automatic compactions. Manual compactions can still + * be issued on this column family + * + * @return true if auto-compactions are disabled. + */ + boolean disableAutoCompactions(); + + /** + * Number of files to trigger level-0 compaction. A value < 0 means that + * level-0 compaction will not be triggered by number of files at all. + * + * Default: 4 + * + * @param level0FileNumCompactionTrigger The number of files to trigger + * level-0 compaction + * @return the reference to the current option. + */ + T setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger); + + /** + * Number of files to trigger level-0 compaction. A value < 0 means that + * level-0 compaction will not be triggered by number of files at all. + * + * Default: 4 + * + * @return The number of files to trigger + */ + int level0FileNumCompactionTrigger(); + + /** + * We try to limit number of bytes in one compaction to be lower than this + * threshold. But it's not guaranteed. + * Value 0 will be sanitized. + * + * @param maxCompactionBytes max bytes in a compaction + * @return the reference to the current option. + * @see #maxCompactionBytes() + */ + T setMaxCompactionBytes(final long maxCompactionBytes); + + /** + * We try to limit number of bytes in one compaction to be lower than this + * threshold. But it's not guaranteed. + * Value 0 will be sanitized. + * + * @return the maximum number of bytes in for a compaction. + * @see #setMaxCompactionBytes(long) + */ + long maxCompactionBytes(); + + /** + * The upper-bound of the total size of level-1 files in bytes. + * Maximum number of bytes for level L can be calculated as + * (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1)) + * For example, if maxBytesForLevelBase is 20MB, and if + * max_bytes_for_level_multiplier is 10, total data size for level-1 + * will be 200MB, total file size for level-2 will be 2GB, + * and total file size for level-3 will be 20GB. + * by default 'maxBytesForLevelBase' is 256MB. + * + * @param maxBytesForLevelBase maximum bytes for level base. + * + * @return the reference to the current option. + * + * See {@link AdvancedMutableColumnFamilyOptionsInterface#setMaxBytesForLevelMultiplier(double)} + */ + T setMaxBytesForLevelBase( + long maxBytesForLevelBase); + + /** + * The upper-bound of the total size of level-1 files in bytes. + * Maximum number of bytes for level L can be calculated as + * (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1)) + * For example, if maxBytesForLevelBase is 20MB, and if + * max_bytes_for_level_multiplier is 10, total data size for level-1 + * will be 200MB, total file size for level-2 will be 2GB, + * and total file size for level-3 will be 20GB. + * by default 'maxBytesForLevelBase' is 256MB. + * + * @return the upper-bound of the total size of level-1 files + * in bytes. + * + * See {@link AdvancedMutableColumnFamilyOptionsInterface#maxBytesForLevelMultiplier()} + */ + long maxBytesForLevelBase(); + + /** + * Compress blocks using the specified compression algorithm. This + * parameter can be changed dynamically. + * + * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. + * + * @param compressionType Compression Type. + * @return the reference to the current option. + */ + T setCompressionType( + CompressionType compressionType); + + /** + * Compress blocks using the specified compression algorithm. This + * parameter can be changed dynamically. + * + * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. + * + * @return Compression type. + */ + CompressionType compressionType(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptions.java new file mode 100644 index 000000000..bfba1dab3 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptions.java @@ -0,0 +1,294 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class MutableDBOptions extends AbstractMutableOptions { + + /** + * User must use builder pattern, or parser. + * + * @param keys the keys + * @param values the values + * + * See {@link #builder()} and {@link #parse(String)}. + */ + private MutableDBOptions(final String[] keys, final String[] values) { + super(keys, values); + } + + /** + * Creates a builder which allows you + * to set MutableDBOptions in a fluent + * manner + * + * @return A builder for MutableDBOptions + */ + public static MutableDBOptionsBuilder builder() { + return new MutableDBOptionsBuilder(); + } + + /** + * Parses a String representation of MutableDBOptions + * + * The format is: key1=value1;key2=value2;key3=value3 etc + * + * For int[] values, each int should be separated by a comma, e.g. + * + * key1=value1;intArrayKey1=1:2:3 + * + * @param str The string representation of the mutable db options + * @param ignoreUnknown what to do if the key is not one of the keys we expect + * + * @return A builder for the mutable db options + */ + public static MutableDBOptionsBuilder parse(final String str, boolean ignoreUnknown) { + Objects.requireNonNull(str); + + final List parsedOptions = OptionString.Parser.parse(str); + return new MutableDBOptions.MutableDBOptionsBuilder().fromParsed(parsedOptions, ignoreUnknown); + } + + public static MutableDBOptionsBuilder parse(final String str) { + return parse(str, false); + } + + private interface MutableDBOptionKey extends MutableOptionKey {} + + public enum DBOption implements MutableDBOptionKey { + max_background_jobs(ValueType.INT), + max_background_compactions(ValueType.INT), + avoid_flush_during_shutdown(ValueType.BOOLEAN), + writable_file_max_buffer_size(ValueType.LONG), + delayed_write_rate(ValueType.LONG), + max_total_wal_size(ValueType.LONG), + delete_obsolete_files_period_micros(ValueType.LONG), + stats_dump_period_sec(ValueType.INT), + stats_persist_period_sec(ValueType.INT), + stats_history_buffer_size(ValueType.LONG), + max_open_files(ValueType.INT), + bytes_per_sync(ValueType.LONG), + wal_bytes_per_sync(ValueType.LONG), + strict_bytes_per_sync(ValueType.BOOLEAN), + compaction_readahead_size(ValueType.LONG); + + private final ValueType valueType; + DBOption(final ValueType valueType) { + this.valueType = valueType; + } + + @Override + public ValueType getValueType() { + return valueType; + } + } + + public static class MutableDBOptionsBuilder + extends AbstractMutableOptionsBuilder + implements MutableDBOptionsInterface { + + private final static Map ALL_KEYS_LOOKUP = new HashMap<>(); + static { + for(final MutableDBOptionKey key : DBOption.values()) { + ALL_KEYS_LOOKUP.put(key.name(), key); + } + } + + private MutableDBOptionsBuilder() { + super(); + } + + @Override + protected MutableDBOptionsBuilder self() { + return this; + } + + @Override + protected Map allKeys() { + return ALL_KEYS_LOOKUP; + } + + @Override + protected MutableDBOptions build(final String[] keys, + final String[] values) { + return new MutableDBOptions(keys, values); + } + + @Override + public MutableDBOptionsBuilder setMaxBackgroundJobs( + final int maxBackgroundJobs) { + return setInt(DBOption.max_background_jobs, maxBackgroundJobs); + } + + @Override + public int maxBackgroundJobs() { + return getInt(DBOption.max_background_jobs); + } + + @Override + @Deprecated + public MutableDBOptionsBuilder setMaxBackgroundCompactions( + final int maxBackgroundCompactions) { + return setInt(DBOption.max_background_compactions, + maxBackgroundCompactions); + } + + @Override + @Deprecated + public int maxBackgroundCompactions() { + return getInt(DBOption.max_background_compactions); + } + + @Override + public MutableDBOptionsBuilder setAvoidFlushDuringShutdown( + final boolean avoidFlushDuringShutdown) { + return setBoolean(DBOption.avoid_flush_during_shutdown, + avoidFlushDuringShutdown); + } + + @Override + public boolean avoidFlushDuringShutdown() { + return getBoolean(DBOption.avoid_flush_during_shutdown); + } + + @Override + public MutableDBOptionsBuilder setWritableFileMaxBufferSize( + final long writableFileMaxBufferSize) { + return setLong(DBOption.writable_file_max_buffer_size, + writableFileMaxBufferSize); + } + + @Override + public long writableFileMaxBufferSize() { + return getLong(DBOption.writable_file_max_buffer_size); + } + + @Override + public MutableDBOptionsBuilder setDelayedWriteRate( + final long delayedWriteRate) { + return setLong(DBOption.delayed_write_rate, + delayedWriteRate); + } + + @Override + public long delayedWriteRate() { + return getLong(DBOption.delayed_write_rate); + } + + @Override + public MutableDBOptionsBuilder setMaxTotalWalSize( + final long maxTotalWalSize) { + return setLong(DBOption.max_total_wal_size, maxTotalWalSize); + } + + @Override + public long maxTotalWalSize() { + return getLong(DBOption.max_total_wal_size); + } + + @Override + public MutableDBOptionsBuilder setDeleteObsoleteFilesPeriodMicros( + final long micros) { + return setLong(DBOption.delete_obsolete_files_period_micros, micros); + } + + @Override + public long deleteObsoleteFilesPeriodMicros() { + return getLong(DBOption.delete_obsolete_files_period_micros); + } + + @Override + public MutableDBOptionsBuilder setStatsDumpPeriodSec( + final int statsDumpPeriodSec) { + return setInt(DBOption.stats_dump_period_sec, statsDumpPeriodSec); + } + + @Override + public int statsDumpPeriodSec() { + return getInt(DBOption.stats_dump_period_sec); + } + + @Override + public MutableDBOptionsBuilder setStatsPersistPeriodSec( + final int statsPersistPeriodSec) { + return setInt(DBOption.stats_persist_period_sec, statsPersistPeriodSec); + } + + @Override + public int statsPersistPeriodSec() { + return getInt(DBOption.stats_persist_period_sec); + } + + @Override + public MutableDBOptionsBuilder setStatsHistoryBufferSize( + final long statsHistoryBufferSize) { + return setLong(DBOption.stats_history_buffer_size, statsHistoryBufferSize); + } + + @Override + public long statsHistoryBufferSize() { + return getLong(DBOption.stats_history_buffer_size); + } + + @Override + public MutableDBOptionsBuilder setMaxOpenFiles(final int maxOpenFiles) { + return setInt(DBOption.max_open_files, maxOpenFiles); + } + + @Override + public int maxOpenFiles() { + return getInt(DBOption.max_open_files); + } + + @Override + public MutableDBOptionsBuilder setBytesPerSync(final long bytesPerSync) { + return setLong(DBOption.bytes_per_sync, bytesPerSync); + } + + @Override + public long bytesPerSync() { + return getLong(DBOption.bytes_per_sync); + } + + @Override + public MutableDBOptionsBuilder setWalBytesPerSync( + final long walBytesPerSync) { + return setLong(DBOption.wal_bytes_per_sync, walBytesPerSync); + } + + @Override + public long walBytesPerSync() { + return getLong(DBOption.wal_bytes_per_sync); + } + + @Override + public MutableDBOptionsBuilder setStrictBytesPerSync( + final boolean strictBytesPerSync) { + return setBoolean(DBOption.strict_bytes_per_sync, strictBytesPerSync); + } + + @Override + public boolean strictBytesPerSync() { + return getBoolean(DBOption.strict_bytes_per_sync); + } + + @Override + public MutableDBOptionsBuilder setCompactionReadaheadSize( + final long compactionReadaheadSize) { + return setLong(DBOption.compaction_readahead_size, + compactionReadaheadSize); + } + + @Override + public long compactionReadaheadSize() { + return getLong(DBOption.compaction_readahead_size); + } + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java new file mode 100644 index 000000000..bdf9d7bf6 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableDBOptionsInterface.java @@ -0,0 +1,440 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +public interface MutableDBOptionsInterface> { + /** + * Specifies the maximum number of concurrent background jobs (both flushes + * and compactions combined). + * Default: 2 + * + * @param maxBackgroundJobs number of max concurrent background jobs + * @return the instance of the current object. + */ + T setMaxBackgroundJobs(int maxBackgroundJobs); + + /** + * Returns the maximum number of concurrent background jobs (both flushes + * and compactions combined). + * Default: 2 + * + * @return the maximum number of concurrent background jobs. + */ + int maxBackgroundJobs(); + + /** + * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the + * value of max_background_jobs. For backwards compatibility we will set + * `max_background_jobs = max_background_compactions + max_background_flushes` + * in the case where user sets at least one of `max_background_compactions` or + * `max_background_flushes` (we replace -1 by 1 in case one option is unset). + * + * Specifies the maximum number of concurrent background compaction jobs, + * submitted to the default LOW priority thread pool. + * If you're increasing this, also consider increasing number of threads in + * LOW priority thread pool. For more information, see + * Default: -1 + * + * @param maxBackgroundCompactions the maximum number of background + * compaction jobs. + * @return the instance of the current object. + * + * @see RocksEnv#setBackgroundThreads(int) + * @see RocksEnv#setBackgroundThreads(int, Priority) + * @see DBOptionsInterface#maxBackgroundFlushes() + * @deprecated Use {@link #setMaxBackgroundJobs(int)} + */ + @Deprecated + T setMaxBackgroundCompactions(int maxBackgroundCompactions); + + /** + * NOT SUPPORTED ANYMORE: RocksDB automatically decides this based on the + * value of max_background_jobs. For backwards compatibility we will set + * `max_background_jobs = max_background_compactions + max_background_flushes` + * in the case where user sets at least one of `max_background_compactions` or + * `max_background_flushes` (we replace -1 by 1 in case one option is unset). + * + * Returns the maximum number of concurrent background compaction jobs, + * submitted to the default LOW priority thread pool. + * When increasing this number, we may also want to consider increasing + * number of threads in LOW priority thread pool. + * Default: -1 + * + * @return the maximum number of concurrent background compaction jobs. + * @see RocksEnv#setBackgroundThreads(int) + * @see RocksEnv#setBackgroundThreads(int, Priority) + * + * @deprecated Use {@link #setMaxBackgroundJobs(int)} + */ + @Deprecated + int maxBackgroundCompactions(); + + /** + * By default RocksDB will flush all memtables on DB close if there are + * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup + * DB close. Unpersisted data WILL BE LOST. + * + * DEFAULT: false + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} + * API. + * + * @param avoidFlushDuringShutdown true if we should avoid flush during + * shutdown + * + * @return the reference to the current options. + */ + T setAvoidFlushDuringShutdown(boolean avoidFlushDuringShutdown); + + /** + * By default RocksDB will flush all memtables on DB close if there are + * unpersisted data (i.e. with WAL disabled) The flush can be skip to speedup + * DB close. Unpersisted data WILL BE LOST. + * + * DEFAULT: false + * + * Dynamically changeable through + * {@link RocksDB#setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} + * API. + * + * @return true if we should avoid flush during shutdown + */ + boolean avoidFlushDuringShutdown(); + + /** + * This is the maximum buffer size that is used by WritableFileWriter. + * On Windows, we need to maintain an aligned buffer for writes. + * We allow the buffer to grow until it's size hits the limit. + * + * Default: 1024 * 1024 (1 MB) + * + * @param writableFileMaxBufferSize the maximum buffer size + * + * @return the reference to the current options. + */ + T setWritableFileMaxBufferSize(long writableFileMaxBufferSize); + + /** + * This is the maximum buffer size that is used by WritableFileWriter. + * On Windows, we need to maintain an aligned buffer for writes. + * We allow the buffer to grow until it's size hits the limit. + * + * Default: 1024 * 1024 (1 MB) + * + * @return the maximum buffer size + */ + long writableFileMaxBufferSize(); + + /** + * The limited write rate to DB if + * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or + * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, + * or we are writing to the last mem table allowed and we allow more than 3 + * mem tables. It is calculated using size of user write requests before + * compression. RocksDB may decide to slow down more if the compaction still + * gets behind further. + * If the value is 0, we will infer a value from `rater_limiter` value + * if it is not empty, or 16MB if `rater_limiter` is empty. Note that + * if users change the rate in `rate_limiter` after DB is opened, + * `delayed_write_rate` won't be adjusted. + * + * Unit: bytes per second. + * + * Default: 0 + * + * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}. + * + * @param delayedWriteRate the rate in bytes per second + * + * @return the reference to the current options. + */ + T setDelayedWriteRate(long delayedWriteRate); + + /** + * The limited write rate to DB if + * {@link ColumnFamilyOptions#softPendingCompactionBytesLimit()} or + * {@link ColumnFamilyOptions#level0SlowdownWritesTrigger()} is triggered, + * or we are writing to the last mem table allowed and we allow more than 3 + * mem tables. It is calculated using size of user write requests before + * compression. RocksDB may decide to slow down more if the compaction still + * gets behind further. + * If the value is 0, we will infer a value from `rater_limiter` value + * if it is not empty, or 16MB if `rater_limiter` is empty. Note that + * if users change the rate in `rate_limiter` after DB is opened, + * `delayed_write_rate` won't be adjusted. + * + * Unit: bytes per second. + * + * Default: 0 + * + * Dynamically changeable through {@link RocksDB#setDBOptions(MutableDBOptions)}. + * + * @return the rate in bytes per second + */ + long delayedWriteRate(); + + /** + *

Set the max total write-ahead log size. Once write-ahead logs exceed this size, we will + * start forcing the flush of column families whose memtables are backed by the oldest live WAL + * file + *

+ *

The oldest WAL files are the ones that are causing all the space amplification. + *

+ * For example, with 15 column families, each with + * write_buffer_size = 128 MB + * max_write_buffer_number = 6 + * max_total_wal_size will be calculated to be [15 * 128MB * 6] * 4 = + * 45GB + *

+ * The RocksDB wiki has some discussion about how the WAL interacts + * with memtables and flushing of column families, at + * ... + *

+ *

If set to 0 (default), we will dynamically choose the WAL size limit to + * be [sum of all write_buffer_size * max_write_buffer_number] * 4

+ *

This option takes effect only when there are more than one column family as + * otherwise the wal size is dictated by the write_buffer_size.

+ *

Default: 0

+ * + * @param maxTotalWalSize max total wal size. + * @return the instance of the current object. + */ + T setMaxTotalWalSize(long maxTotalWalSize); + + /** + *

Returns the max total write-ahead log size. Once write-ahead logs exceed this size, + * we will start forcing the flush of column families whose memtables are + * backed by the oldest live WAL file.

+ *

The oldest WAL files are the ones that are causing all the space amplification. + *

+ * For example, with 15 column families, each with + * write_buffer_size = 128 MB + * max_write_buffer_number = 6 + * max_total_wal_size will be calculated to be [15 * 128MB * 6] * 4 = + * 45GB + *

+ * The RocksDB wiki has some discussion about how the WAL interacts + * with memtables and flushing of column families, at + * ... + *

+ *

If set to 0 (default), we will dynamically choose the WAL size limit to + * be [sum of all write_buffer_size * max_write_buffer_number] * 4

+ *

This option takes effect only when there are more than one column family as + * otherwise the wal size is dictated by the write_buffer_size.

+ *

Default: 0

+ * + * + *

If set to 0 (default), we will dynamically choose the WAL size limit + * to be [sum of all write_buffer_size * max_write_buffer_number] * 4 + *

+ * + * @return max total wal size + */ + long maxTotalWalSize(); + + /** + * The periodicity when obsolete files get deleted. The default + * value is 6 hours. The files that get out of scope by compaction + * process will still get automatically delete on every compaction, + * regardless of this setting + * + * @param micros the time interval in micros + * @return the instance of the current object. + */ + T setDeleteObsoleteFilesPeriodMicros(long micros); + + /** + * The periodicity when obsolete files get deleted. The default + * value is 6 hours. The files that get out of scope by compaction + * process will still get automatically delete on every compaction, + * regardless of this setting + * + * @return the time interval in micros when obsolete files will be deleted. + */ + long deleteObsoleteFilesPeriodMicros(); + + /** + * if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec + * Default: 600 (10 minutes) + * + * @param statsDumpPeriodSec time interval in seconds. + * @return the instance of the current object. + */ + T setStatsDumpPeriodSec(int statsDumpPeriodSec); + + /** + * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec + * Default: 600 (10 minutes) + * + * @return time interval in seconds. + */ + int statsDumpPeriodSec(); + + /** + * If not zero, dump rocksdb.stats to RocksDB every + * {@code statsPersistPeriodSec} + * + * Default: 600 + * + * @param statsPersistPeriodSec time interval in seconds. + * @return the instance of the current object. + */ + T setStatsPersistPeriodSec(int statsPersistPeriodSec); + + /** + * If not zero, dump rocksdb.stats to RocksDB every + * {@code statsPersistPeriodSec} + * + * @return time interval in seconds. + */ + int statsPersistPeriodSec(); + + /** + * If not zero, periodically take stats snapshots and store in memory, the + * memory size for stats snapshots is capped at {@code statsHistoryBufferSize} + * + * Default: 1MB + * + * @param statsHistoryBufferSize the size of the buffer. + * @return the instance of the current object. + */ + T setStatsHistoryBufferSize(long statsHistoryBufferSize); + + /** + * If not zero, periodically take stats snapshots and store in memory, the + * memory size for stats snapshots is capped at {@code statsHistoryBufferSize} + * + * @return the size of the buffer. + */ + long statsHistoryBufferSize(); + + /** + * Number of open files that can be used by the DB. You may need to + * increase this if your database has a large working set. Value -1 means + * files opened are always kept open. You can estimate number of files based + * on {@code target_file_size_base} and {@code target_file_size_multiplier} + * for level-based compaction. For universal-style compaction, you can usually + * set it to -1. + * Default: -1 + * + * @param maxOpenFiles the maximum number of open files. + * @return the instance of the current object. + */ + T setMaxOpenFiles(int maxOpenFiles); + + /** + * Number of open files that can be used by the DB. You may need to + * increase this if your database has a large working set. Value -1 means + * files opened are always kept open. You can estimate number of files based + * on {@code target_file_size_base} and {@code target_file_size_multiplier} + * for level-based compaction. For universal-style compaction, you can usually + * set it to -1. + * Default: -1 + * + * @return the maximum number of open files. + */ + int maxOpenFiles(); + + /** + * Allows OS to incrementally sync files to disk while they are being + * written, asynchronously, in the background. + * Issue one request for every bytes_per_sync written. 0 turns it off. + * Default: 0 + * + * @param bytesPerSync size in bytes + * @return the instance of the current object. + */ + T setBytesPerSync(long bytesPerSync); + + /** + * Allows OS to incrementally sync files to disk while they are being + * written, asynchronously, in the background. + * Issue one request for every bytes_per_sync written. 0 turns it off. + * Default: 0 + * + * @return size in bytes + */ + long bytesPerSync(); + + /** + * Same as {@link #setBytesPerSync(long)} , but applies to WAL files + * + * Default: 0, turned off + * + * @param walBytesPerSync size in bytes + * @return the instance of the current object. + */ + T setWalBytesPerSync(long walBytesPerSync); + + /** + * Same as {@link #bytesPerSync()} , but applies to WAL files + * + * Default: 0, turned off + * + * @return size in bytes + */ + long walBytesPerSync(); + + /** + * When true, guarantees WAL files have at most {@link #walBytesPerSync()} + * bytes submitted for writeback at any given time, and SST files have at most + * {@link #bytesPerSync()} bytes pending writeback at any given time. This + * can be used to handle cases where processing speed exceeds I/O speed + * during file generation, which can lead to a huge sync when the file is + * finished, even with {@link #bytesPerSync()} / {@link #walBytesPerSync()} + * properly configured. + * + * - If `sync_file_range` is supported it achieves this by waiting for any + * prior `sync_file_range`s to finish before proceeding. In this way, + * processing (compression, etc.) can proceed uninhibited in the gap + * between `sync_file_range`s, and we block only when I/O falls + * behind. + * - Otherwise the `WritableFile::Sync` method is used. Note this mechanism + * always blocks, thus preventing the interleaving of I/O and processing. + * + * Note: Enabling this option does not provide any additional persistence + * guarantees, as it may use `sync_file_range`, which does not write out + * metadata. + * + * Default: false + * + * @param strictBytesPerSync the bytes per sync + * @return the instance of the current object. + */ + T setStrictBytesPerSync(boolean strictBytesPerSync); + + /** + * Return the strict byte limit per sync. + * + * See {@link #setStrictBytesPerSync(boolean)} + * + * @return the limit in bytes. + */ + boolean strictBytesPerSync(); + + /** + * If non-zero, we perform bigger reads when doing compaction. If you're + * running RocksDB on spinning disks, you should set this to at least 2MB. + * + * That way RocksDB's compaction is doing sequential instead of random reads. + * + * Default: 0 + * + * @param compactionReadaheadSize The compaction read-ahead size + * + * @return the reference to the current options. + */ + T setCompactionReadaheadSize(final long compactionReadaheadSize); + + /** + * If non-zero, we perform bigger reads when doing compaction. If you're + * running RocksDB on spinning disks, you should set this to at least 2MB. + * + * That way RocksDB's compaction is doing sequential instead of random reads. + * + * Default: 0 + * + * @return The compaction read-ahead size + */ + long compactionReadaheadSize(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionKey.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionKey.java new file mode 100644 index 000000000..ec1b9ff3b --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionKey.java @@ -0,0 +1,16 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +public interface MutableOptionKey { + enum ValueType { + DOUBLE, + LONG, + INT, + BOOLEAN, + INT_ARRAY, + ENUM + } + + String name(); + ValueType getValueType(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionValue.java b/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionValue.java new file mode 100644 index 000000000..7f69eeb9e --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/MutableOptionValue.java @@ -0,0 +1,369 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +import static org.rocksdb.AbstractMutableOptions.INT_ARRAY_INT_SEPARATOR; + +public abstract class MutableOptionValue { + + abstract double asDouble() throws NumberFormatException; + abstract long asLong() throws NumberFormatException; + abstract int asInt() throws NumberFormatException; + abstract boolean asBoolean() throws IllegalStateException; + abstract int[] asIntArray() throws IllegalStateException; + abstract String asString(); + abstract T asObject(); + + private static abstract class MutableOptionValueObject + extends MutableOptionValue { + protected final T value; + + protected MutableOptionValueObject(final T value) { + this.value = value; + } + + @Override T asObject() { + return value; + } + } + + static MutableOptionValue fromString(final String s) { + return new MutableOptionStringValue(s); + } + + static MutableOptionValue fromDouble(final double d) { + return new MutableOptionDoubleValue(d); + } + + static MutableOptionValue fromLong(final long d) { + return new MutableOptionLongValue(d); + } + + static MutableOptionValue fromInt(final int i) { + return new MutableOptionIntValue(i); + } + + static MutableOptionValue fromBoolean(final boolean b) { + return new MutableOptionBooleanValue(b); + } + + static MutableOptionValue fromIntArray(final int[] ix) { + return new MutableOptionIntArrayValue(ix); + } + + static > MutableOptionValue fromEnum(final N value) { + return new MutableOptionEnumValue<>(value); + } + + static class MutableOptionStringValue + extends MutableOptionValueObject { + MutableOptionStringValue(final String value) { + super(value); + } + + @Override + double asDouble() throws NumberFormatException { + return Double.parseDouble(value); + } + + @Override + long asLong() throws NumberFormatException { + return Long.parseLong(value); + } + + @Override + int asInt() throws NumberFormatException { + return Integer.parseInt(value); + } + + @Override + boolean asBoolean() throws IllegalStateException { + return Boolean.parseBoolean(value); + } + + @Override + int[] asIntArray() throws IllegalStateException { + throw new IllegalStateException("String is not applicable as int[]"); + } + + @Override + String asString() { + return value; + } + } + + static class MutableOptionDoubleValue + extends MutableOptionValue { + private final double value; + MutableOptionDoubleValue(final double value) { + this.value = value; + } + + @Override + double asDouble() { + return value; + } + + @Override + long asLong() throws NumberFormatException { + return Double.valueOf(value).longValue(); + } + + @Override + int asInt() throws NumberFormatException { + if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { + throw new NumberFormatException( + "double value lies outside the bounds of int"); + } + return Double.valueOf(value).intValue(); + } + + @Override + boolean asBoolean() throws IllegalStateException { + throw new IllegalStateException( + "double is not applicable as boolean"); + } + + @Override + int[] asIntArray() throws IllegalStateException { + if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { + throw new NumberFormatException( + "double value lies outside the bounds of int"); + } + return new int[] { Double.valueOf(value).intValue() }; + } + + @Override + String asString() { + return String.valueOf(value); + } + + @Override + Double asObject() { + return value; + } + } + + static class MutableOptionLongValue + extends MutableOptionValue { + private final long value; + + MutableOptionLongValue(final long value) { + this.value = value; + } + + @Override + double asDouble() { + return Long.valueOf(value).doubleValue(); + } + + @Override + long asLong() throws NumberFormatException { + return value; + } + + @Override + int asInt() throws NumberFormatException { + if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { + throw new NumberFormatException( + "long value lies outside the bounds of int"); + } + return Long.valueOf(value).intValue(); + } + + @Override + boolean asBoolean() throws IllegalStateException { + throw new IllegalStateException( + "long is not applicable as boolean"); + } + + @Override + int[] asIntArray() throws IllegalStateException { + if(value > Integer.MAX_VALUE || value < Integer.MIN_VALUE) { + throw new NumberFormatException( + "long value lies outside the bounds of int"); + } + return new int[] { Long.valueOf(value).intValue() }; + } + + @Override + String asString() { + return String.valueOf(value); + } + + @Override + Long asObject() { + return value; + } + } + + static class MutableOptionIntValue + extends MutableOptionValue { + private final int value; + + MutableOptionIntValue(final int value) { + this.value = value; + } + + @Override + double asDouble() { + return Integer.valueOf(value).doubleValue(); + } + + @Override + long asLong() throws NumberFormatException { + return value; + } + + @Override + int asInt() throws NumberFormatException { + return value; + } + + @Override + boolean asBoolean() throws IllegalStateException { + throw new IllegalStateException("int is not applicable as boolean"); + } + + @Override + int[] asIntArray() throws IllegalStateException { + return new int[] { value }; + } + + @Override + String asString() { + return String.valueOf(value); + } + + @Override + Integer asObject() { + return value; + } + } + + static class MutableOptionBooleanValue + extends MutableOptionValue { + private final boolean value; + + MutableOptionBooleanValue(final boolean value) { + this.value = value; + } + + @Override + double asDouble() { + throw new NumberFormatException("boolean is not applicable as double"); + } + + @Override + long asLong() throws NumberFormatException { + throw new NumberFormatException("boolean is not applicable as Long"); + } + + @Override + int asInt() throws NumberFormatException { + throw new NumberFormatException("boolean is not applicable as int"); + } + + @Override + boolean asBoolean() { + return value; + } + + @Override + int[] asIntArray() throws IllegalStateException { + throw new IllegalStateException("boolean is not applicable as int[]"); + } + + @Override + String asString() { + return String.valueOf(value); + } + + @Override + Boolean asObject() { + return value; + } + } + + static class MutableOptionIntArrayValue + extends MutableOptionValueObject { + MutableOptionIntArrayValue(final int[] value) { + super(value); + } + + @Override + double asDouble() { + throw new NumberFormatException("int[] is not applicable as double"); + } + + @Override + long asLong() throws NumberFormatException { + throw new NumberFormatException("int[] is not applicable as Long"); + } + + @Override + int asInt() throws NumberFormatException { + throw new NumberFormatException("int[] is not applicable as int"); + } + + @Override + boolean asBoolean() { + throw new NumberFormatException("int[] is not applicable as boolean"); + } + + @Override + int[] asIntArray() throws IllegalStateException { + return value; + } + + @Override + String asString() { + final StringBuilder builder = new StringBuilder(); + for(int i = 0; i < value.length; i++) { + builder.append(value[i]); + if(i + 1 < value.length) { + builder.append(INT_ARRAY_INT_SEPARATOR); + } + } + return builder.toString(); + } + } + + static class MutableOptionEnumValue> + extends MutableOptionValueObject { + + MutableOptionEnumValue(final T value) { + super(value); + } + + @Override + double asDouble() throws NumberFormatException { + throw new NumberFormatException("Enum is not applicable as double"); + } + + @Override + long asLong() throws NumberFormatException { + throw new NumberFormatException("Enum is not applicable as long"); + } + + @Override + int asInt() throws NumberFormatException { + throw new NumberFormatException("Enum is not applicable as int"); + } + + @Override + boolean asBoolean() throws IllegalStateException { + throw new NumberFormatException("Enum is not applicable as boolean"); + } + + @Override + int[] asIntArray() throws IllegalStateException { + throw new NumberFormatException("Enum is not applicable as int[]"); + } + + @Override + String asString() { + return value.name(); + } + } + +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java b/src/rocksdb/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java new file mode 100644 index 000000000..6acc146f7 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/NativeComparatorWrapper.java @@ -0,0 +1,59 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + * A simple abstraction to allow a Java class to wrap a custom comparator + * implemented in C++. + * + * The native comparator must directly extend rocksdb::Comparator. + */ +public abstract class NativeComparatorWrapper + extends AbstractComparator { + + @Override + final ComparatorType getComparatorType() { + return ComparatorType.JAVA_NATIVE_COMPARATOR_WRAPPER; + } + + @Override + public final String name() { + throw new IllegalStateException("This should not be called. " + + "Implementation is in Native code"); + } + + @Override + public final int compare(final ByteBuffer s1, final ByteBuffer s2) { + throw new IllegalStateException("This should not be called. " + + "Implementation is in Native code"); + } + + @Override + public final void findShortestSeparator(final ByteBuffer start, final ByteBuffer limit) { + throw new IllegalStateException("This should not be called. " + + "Implementation is in Native code"); + } + + @Override + public final void findShortSuccessor(final ByteBuffer key) { + throw new IllegalStateException("This should not be called. " + + "Implementation is in Native code"); + } + + /** + * We override {@link RocksCallbackObject#disposeInternal()} + * as disposing of a native rocksdb::Comparator extension requires + * a slightly different approach as it is not really a RocksCallbackObject + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + private native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java b/src/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java new file mode 100644 index 000000000..b97cf28b9 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java @@ -0,0 +1,172 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +import java.io.*; +import java.nio.file.Files; +import java.nio.file.StandardCopyOption; + +import org.rocksdb.util.Environment; + +/** + * This class is used to load the RocksDB shared library from within the jar. + * The shared library is extracted to a temp folder and loaded from there. + */ +public class NativeLibraryLoader { + //singleton + private static final NativeLibraryLoader instance = new NativeLibraryLoader(); + private static boolean initialized = false; + + private static final String sharedLibraryName = Environment.getSharedLibraryName("rocksdb"); + private static final String jniLibraryName = Environment.getJniLibraryName("rocksdb"); + private static final /* @Nullable */ String fallbackJniLibraryName = + Environment.getFallbackJniLibraryName("rocksdb"); + private static final String jniLibraryFileName = Environment.getJniLibraryFileName("rocksdb"); + private static final /* @Nullable */ String fallbackJniLibraryFileName = + Environment.getFallbackJniLibraryFileName("rocksdb"); + private static final String tempFilePrefix = "librocksdbjni"; + private static final String tempFileSuffix = Environment.getJniLibraryExtension(); + + /** + * Get a reference to the NativeLibraryLoader + * + * @return The NativeLibraryLoader + */ + public static NativeLibraryLoader getInstance() { + return instance; + } + + /** + * Firstly attempts to load the library from java.library.path, + * if that fails then it falls back to extracting + * the library from the classpath + * {@link org.rocksdb.NativeLibraryLoader#loadLibraryFromJar(java.lang.String)} + * + * @param tmpDir A temporary directory to use + * to copy the native library to when loading from the classpath. + * If null, or the empty string, we rely on Java's + * {@link java.io.File#createTempFile(String, String)} + * function to provide a temporary location. + * The temporary file will be registered for deletion + * on exit. + * + * @throws java.io.IOException if a filesystem operation fails. + */ + public synchronized void loadLibrary(final String tmpDir) throws IOException { + try { + // try dynamic library + System.loadLibrary(sharedLibraryName); + return; + } catch (final UnsatisfiedLinkError ule) { + // ignore - try from static library + } + + try { + // try static library + System.loadLibrary(jniLibraryName); + return; + } catch (final UnsatisfiedLinkError ule) { + // ignore - then try static library fallback or from jar + } + + if (fallbackJniLibraryName != null) { + try { + // try static library fallback + System.loadLibrary(fallbackJniLibraryName); + return; + } catch (final UnsatisfiedLinkError ule) { + // ignore - then try from jar + } + } + + // try jar + loadLibraryFromJar(tmpDir); + } + + /** + * Attempts to extract the native RocksDB library + * from the classpath and load it + * + * @param tmpDir A temporary directory to use + * to copy the native library to. If null, + * or the empty string, we rely on Java's + * {@link java.io.File#createTempFile(String, String)} + * function to provide a temporary location. + * The temporary file will be registered for deletion + * on exit. + * + * @throws java.io.IOException if a filesystem operation fails. + */ + void loadLibraryFromJar(final String tmpDir) + throws IOException { + if (!initialized) { + System.load(loadLibraryFromJarToTemp(tmpDir).getAbsolutePath()); + initialized = true; + } + } + + File loadLibraryFromJarToTemp(final String tmpDir) + throws IOException { + InputStream is = null; + try { + // attempt to look up the static library in the jar file + String libraryFileName = jniLibraryFileName; + is = getClass().getClassLoader().getResourceAsStream(libraryFileName); + + if (is == null) { + // is there a fallback we can try + if (fallbackJniLibraryFileName == null) { + throw new RuntimeException(libraryFileName + " was not found inside JAR."); + } + + // attempt to look up the fallback static library in the jar file + libraryFileName = fallbackJniLibraryFileName; + is = getClass().getClassLoader().getResourceAsStream(libraryFileName); + if (is == null) { + throw new RuntimeException(libraryFileName + " was not found inside JAR."); + } + } + + // create a temporary file to copy the library to + final File temp; + if (tmpDir == null || tmpDir.isEmpty()) { + temp = File.createTempFile(tempFilePrefix, tempFileSuffix); + } else { + final File parentDir = new File(tmpDir); + if (!parentDir.exists()) { + throw new RuntimeException( + "Directory: " + parentDir.getAbsolutePath() + " does not exist!"); + } + temp = new File(parentDir, libraryFileName); + if (temp.exists() && !temp.delete()) { + throw new RuntimeException( + "File: " + temp.getAbsolutePath() + " already exists and cannot be removed."); + } + if (!temp.createNewFile()) { + throw new RuntimeException("File: " + temp.getAbsolutePath() + " could not be created."); + } + } + if (!temp.exists()) { + throw new RuntimeException("File " + temp.getAbsolutePath() + " does not exist."); + } else { + temp.deleteOnExit(); + } + + // copy the library from the Jar file to the temp destination + Files.copy(is, temp.toPath(), StandardCopyOption.REPLACE_EXISTING); + + // return the temporary library file + return temp; + + } finally { + if (is != null) { + is.close(); + } + } + } + + /** + * Private constructor to disallow instantiation + */ + private NativeLibraryLoader() { + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/OperationStage.java b/src/rocksdb/java/src/main/java/org/rocksdb/OperationStage.java new file mode 100644 index 000000000..6ac0a15a2 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/OperationStage.java @@ -0,0 +1,59 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The operation stage. + */ +public enum OperationStage { + STAGE_UNKNOWN((byte)0x0), + STAGE_FLUSH_RUN((byte)0x1), + STAGE_FLUSH_WRITE_L0((byte)0x2), + STAGE_COMPACTION_PREPARE((byte)0x3), + STAGE_COMPACTION_RUN((byte)0x4), + STAGE_COMPACTION_PROCESS_KV((byte)0x5), + STAGE_COMPACTION_INSTALL((byte)0x6), + STAGE_COMPACTION_SYNC_FILE((byte)0x7), + STAGE_PICK_MEMTABLES_TO_FLUSH((byte)0x8), + STAGE_MEMTABLE_ROLLBACK((byte)0x9), + STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS((byte)0xA); + + private final byte value; + + OperationStage(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ + byte getValue() { + return value; + } + + /** + * Get the Operation stage from the internal representation value. + * + * @param value the internal representation value. + * + * @return the operation stage + * + * @throws IllegalArgumentException if the value does not match + * an OperationStage + */ + static OperationStage fromValue(final byte value) + throws IllegalArgumentException { + for (final OperationStage threadType : OperationStage.values()) { + if (threadType.value == value) { + return threadType; + } + } + throw new IllegalArgumentException( + "Unknown value for OperationStage: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/OperationType.java b/src/rocksdb/java/src/main/java/org/rocksdb/OperationType.java new file mode 100644 index 000000000..7cc9b65cd --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/OperationType.java @@ -0,0 +1,54 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The type used to refer to a thread operation. + * + * A thread operation describes high-level action of a thread, + * examples include compaction and flush. + */ +public enum OperationType { + OP_UNKNOWN((byte)0x0), + OP_COMPACTION((byte)0x1), + OP_FLUSH((byte)0x2); + + private final byte value; + + OperationType(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ + byte getValue() { + return value; + } + + /** + * Get the Operation type from the internal representation value. + * + * @param value the internal representation value. + * + * @return the operation type + * + * @throws IllegalArgumentException if the value does not match + * an OperationType + */ + static OperationType fromValue(final byte value) + throws IllegalArgumentException { + for (final OperationType threadType : OperationType.values()) { + if (threadType.value == value) { + return threadType; + } + } + throw new IllegalArgumentException( + "Unknown value for OperationType: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java b/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java new file mode 100644 index 000000000..5a2e1f3ed --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionDB.java @@ -0,0 +1,226 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; + +/** + * Database with Transaction support. + */ +public class OptimisticTransactionDB extends RocksDB + implements TransactionalDB { + + /** + * Private constructor. + * + * @param nativeHandle The native handle of the C++ OptimisticTransactionDB + * object + */ + private OptimisticTransactionDB(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Open an OptimisticTransactionDB similar to + * {@link RocksDB#open(Options, String)}. + * + * @param options {@link org.rocksdb.Options} instance. + * @param path the path to the rocksdb. + * + * @return a {@link OptimisticTransactionDB} instance on success, null if the + * specified {@link OptimisticTransactionDB} can not be opened. + * + * @throws RocksDBException if an error occurs whilst opening the database. + */ + public static OptimisticTransactionDB open(final Options options, + final String path) throws RocksDBException { + final OptimisticTransactionDB otdb = new OptimisticTransactionDB(open( + options.nativeHandle_, path)); + + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + otdb.storeOptionsInstance(options); + + return otdb; + } + + /** + * Open an OptimisticTransactionDB similar to + * {@link RocksDB#open(DBOptions, String, List, List)}. + * + * @param dbOptions {@link org.rocksdb.DBOptions} instance. + * @param path the path to the rocksdb. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * + * @return a {@link OptimisticTransactionDB} instance on success, null if the + * specified {@link OptimisticTransactionDB} can not be opened. + * + * @throws RocksDBException if an error occurs whilst opening the database. + */ + public static OptimisticTransactionDB open(final DBOptions dbOptions, + final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) + throws RocksDBException { + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors + .get(i); + cfNames[i] = cfDescriptor.getName(); + cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_; + } + + final long[] handles = open(dbOptions.nativeHandle_, path, cfNames, + cfOptionHandles); + final OptimisticTransactionDB otdb = + new OptimisticTransactionDB(handles[0]); + + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + otdb.storeOptionsInstance(dbOptions); + + for (int i = 1; i < handles.length; i++) { + columnFamilyHandles.add(new ColumnFamilyHandle(otdb, handles[i])); + } + + return otdb; + } + + + /** + * This is similar to {@link #close()} except that it + * throws an exception if any error occurs. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + * + * @throws RocksDBException if an error occurs whilst closing. + */ + public void closeE() throws RocksDBException { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } finally { + disposeInternal(); + } + } + } + + /** + * This is similar to {@link #closeE()} except that it + * silently ignores any errors. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + */ + @Override + public void close() { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } catch (final RocksDBException e) { + // silently ignore the error report + } finally { + disposeInternal(); + } + } + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions) { + return new Transaction(this, beginTransaction(nativeHandle_, + writeOptions.nativeHandle_)); + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final OptimisticTransactionOptions optimisticTransactionOptions) { + return new Transaction(this, beginTransaction(nativeHandle_, + writeOptions.nativeHandle_, + optimisticTransactionOptions.nativeHandle_)); + } + + // TODO(AR) consider having beingTransaction(... oldTransaction) set a + // reference count inside Transaction, so that we can always call + // Transaction#close but the object is only disposed when there are as many + // closes as beginTransaction. Makes the try-with-resources paradigm easier for + // java developers + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final Transaction oldTransaction) { + final long jtxn_handle = beginTransaction_withOld(nativeHandle_, + writeOptions.nativeHandle_, oldTransaction.nativeHandle_); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(jtxn_handle == oldTransaction.nativeHandle_); + + return oldTransaction; + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final OptimisticTransactionOptions optimisticTransactionOptions, + final Transaction oldTransaction) { + final long jtxn_handle = beginTransaction_withOld(nativeHandle_, + writeOptions.nativeHandle_, optimisticTransactionOptions.nativeHandle_, + oldTransaction.nativeHandle_); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(jtxn_handle == oldTransaction.nativeHandle_); + + return oldTransaction; + } + + /** + * Get the underlying database that was opened. + * + * @return The underlying database that was opened. + */ + public RocksDB getBaseDB() { + final RocksDB db = new RocksDB(getBaseDB(nativeHandle_)); + db.disOwnNativeHandle(); + return db; + } + + @Override protected final native void disposeInternal(final long handle); + + protected static native long open(final long optionsHandle, + final String path) throws RocksDBException; + protected static native long[] open(final long handle, final String path, + final byte[][] columnFamilyNames, final long[] columnFamilyOptions); + private native static void closeDatabase(final long handle) + throws RocksDBException; + private native long beginTransaction(final long handle, + final long writeOptionsHandle); + private native long beginTransaction(final long handle, + final long writeOptionsHandle, + final long optimisticTransactionOptionsHandle); + private native long beginTransaction_withOld(final long handle, + final long writeOptionsHandle, final long oldTransactionHandle); + private native long beginTransaction_withOld(final long handle, + final long writeOptionsHandle, + final long optimisticTransactionOptionsHandle, + final long oldTransactionHandle); + private native long getBaseDB(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java new file mode 100644 index 000000000..250edf806 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/OptimisticTransactionOptions.java @@ -0,0 +1,53 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class OptimisticTransactionOptions extends RocksObject + implements TransactionalOptions { + + public OptimisticTransactionOptions() { + super(newOptimisticTransactionOptions()); + } + + @Override + public boolean isSetSnapshot() { + assert(isOwningHandle()); + return isSetSnapshot(nativeHandle_); + } + + @Override + public OptimisticTransactionOptions setSetSnapshot( + final boolean setSnapshot) { + assert(isOwningHandle()); + setSetSnapshot(nativeHandle_, setSnapshot); + return this; + } + + /** + * Should be set if the DB has a non-default comparator. + * See comment in + * {@link WriteBatchWithIndex#WriteBatchWithIndex(AbstractComparator, int, boolean)} + * constructor. + * + * @param comparator The comparator to use for the transaction. + * + * @return this OptimisticTransactionOptions instance + */ + public OptimisticTransactionOptions setComparator( + final AbstractComparator comparator) { + assert(isOwningHandle()); + setComparator(nativeHandle_, comparator.nativeHandle_); + return this; + } + + private native static long newOptimisticTransactionOptions(); + private native boolean isSetSnapshot(final long handle); + private native void setSetSnapshot(final long handle, + final boolean setSnapshot); + private native void setComparator(final long handle, + final long comparatorHandle); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/OptionString.java b/src/rocksdb/java/src/main/java/org/rocksdb/OptionString.java new file mode 100644 index 000000000..7f97827cb --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/OptionString.java @@ -0,0 +1,256 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +public class OptionString { + private final static char kvPairSeparator = ';'; + private final static char kvSeparator = '='; + private final static char complexValueBegin = '{'; + private final static char complexValueEnd = '}'; + private final static char wrappedValueBegin = '{'; + private final static char wrappedValueEnd = '}'; + private final static char arrayValueSeparator = ':'; + + static class Value { + final List list; + final List complex; + + public Value(final List list, final List complex) { + this.list = list; + this.complex = complex; + } + + public boolean isList() { + return (this.list != null && this.complex == null); + } + + public static Value fromList(final List list) { + return new Value(list, null); + } + + public static Value fromComplex(final List complex) { + return new Value(null, complex); + } + + public String toString() { + final StringBuilder sb = new StringBuilder(); + if (isList()) { + for (final String item : list) { + sb.append(item).append(arrayValueSeparator); + } + // remove the final separator + if (sb.length() > 0) + sb.delete(sb.length() - 1, sb.length()); + } else { + sb.append('['); + for (final Entry entry : complex) { + sb.append(entry.toString()).append(';'); + } + sb.append(']'); + } + return sb.toString(); + } + } + + static class Entry { + public final String key; + public final Value value; + + private Entry(final String key, final Value value) { + this.key = key; + this.value = value; + } + + public String toString() { + return "" + key + "=" + value; + } + } + + static class Parser { + static class Exception extends RuntimeException { + public Exception(final String s) { + super(s); + } + } + + final String str; + final StringBuilder sb; + + private Parser(final String str) { + this.str = str; + this.sb = new StringBuilder(str); + } + + private void exception(final String message) { + final int pos = str.length() - sb.length(); + final int before = Math.min(pos, 64); + final int after = Math.min(64, str.length() - pos); + final String here = + str.substring(pos - before, pos) + "__*HERE*__" + str.substring(pos, pos + after); + + throw new Parser.Exception(message + " at [" + here + "]"); + } + + private void skipWhite() { + while (sb.length() > 0 && Character.isWhitespace(sb.charAt(0))) { + sb.delete(0, 1); + } + } + + private char first() { + if (sb.length() == 0) + exception("Unexpected end of input"); + return sb.charAt(0); + } + + private char next() { + if (sb.length() == 0) + exception("Unexpected end of input"); + final char c = sb.charAt(0); + sb.delete(0, 1); + return c; + } + + private boolean hasNext() { + return (sb.length() > 0); + } + + private boolean is(final char c) { + return (sb.length() > 0 && sb.charAt(0) == c); + } + + private boolean isKeyChar() { + if (!hasNext()) + return false; + final char c = first(); + return (Character.isAlphabetic(c) || Character.isDigit(c) || "_".indexOf(c) != -1); + } + + private boolean isValueChar() { + if (!hasNext()) + return false; + final char c = first(); + return (Character.isAlphabetic(c) || Character.isDigit(c) || "_-+.[]".indexOf(c) != -1); + } + + private String parseKey() { + final StringBuilder sbKey = new StringBuilder(); + sbKey.append(next()); + while (isKeyChar()) { + sbKey.append(next()); + } + + return sbKey.toString(); + } + + private String parseSimpleValue() { + if (is(wrappedValueBegin)) { + next(); + final String result = parseSimpleValue(); + if (!is(wrappedValueEnd)) { + exception("Expected to end a wrapped value with " + wrappedValueEnd); + } + next(); + + return result; + } else { + final StringBuilder sbValue = new StringBuilder(); + while (isValueChar()) sbValue.append(next()); + + return sbValue.toString(); + } + } + + private List parseList() { + final List list = new ArrayList<>(1); + while (true) { + list.add(parseSimpleValue()); + if (!is(arrayValueSeparator)) + break; + + next(); + } + + return list; + } + + private Entry parseOption() { + skipWhite(); + if (!isKeyChar()) { + exception("No valid key character(s) for key in key=value "); + } + final String key = parseKey(); + skipWhite(); + if (is(kvSeparator)) { + next(); + } else { + exception("Expected = separating key and value"); + } + skipWhite(); + final Value value = parseValue(); + return new Entry(key, value); + } + + private Value parseValue() { + skipWhite(); + if (is(complexValueBegin)) { + next(); + skipWhite(); + final Value value = Value.fromComplex(parseComplex()); + skipWhite(); + if (is(complexValueEnd)) { + next(); + skipWhite(); + } else { + exception("Expected } ending complex value"); + } + return value; + } else if (isValueChar()) { + return Value.fromList(parseList()); + } + + exception("No valid value character(s) for value in key=value"); + return null; + } + + private List parseComplex() { + final List entries = new ArrayList<>(); + + skipWhite(); + if (hasNext()) { + entries.add(parseOption()); + skipWhite(); + while (is(kvPairSeparator)) { + next(); + skipWhite(); + if (!isKeyChar()) { + // the separator was a terminator + break; + } + entries.add(parseOption()); + skipWhite(); + } + } + return entries; + } + + public static List parse(final String str) { + Objects.requireNonNull(str); + + final Parser parser = new Parser(str); + final List result = parser.parseComplex(); + if (parser.hasNext()) { + parser.exception("Unexpected end of parsing "); + } + + return result; + } + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Options.java b/src/rocksdb/java/src/main/java/org/rocksdb/Options.java new file mode 100644 index 000000000..1f1e5507a --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Options.java @@ -0,0 +1,2578 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.file.Paths; +import java.util.*; + +/** + * Options to control the behavior of a database. It will be used + * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). + * + * As a descendent of {@link AbstractNativeReference}, this class is {@link AutoCloseable} + * and will be automatically released if opened in the preamble of a try with resources block. + */ +public class Options extends RocksObject + implements DBOptionsInterface, + MutableDBOptionsInterface, + ColumnFamilyOptionsInterface, + MutableColumnFamilyOptionsInterface { + static { + RocksDB.loadLibrary(); + } + + /** + * Converts the input properties into a Options-style formatted string + * @param properties The set of properties to convert + * @return The Options-style representation of those properties. + */ + public static String getOptionStringFromProps(final Properties properties) { + if (properties == null || properties.size() == 0) { + throw new IllegalArgumentException("Properties value must contain at least one value."); + } + StringBuilder stringBuilder = new StringBuilder(); + for (final String name : properties.stringPropertyNames()) { + stringBuilder.append(name); + stringBuilder.append("="); + stringBuilder.append(properties.getProperty(name)); + stringBuilder.append(";"); + } + return stringBuilder.toString(); + } + + /** + * Construct options for opening a RocksDB. + * + * This constructor will create (by allocating a block of memory) + * an {@code rocksdb::Options} in the c++ side. + */ + public Options() { + super(newOptions()); + env_ = Env.getDefault(); + } + + /** + * Construct options for opening a RocksDB. Reusing database options + * and column family options. + * + * @param dbOptions {@link org.rocksdb.DBOptions} instance + * @param columnFamilyOptions {@link org.rocksdb.ColumnFamilyOptions} + * instance + */ + public Options(final DBOptions dbOptions, + final ColumnFamilyOptions columnFamilyOptions) { + super(newOptions(dbOptions.nativeHandle_, + columnFamilyOptions.nativeHandle_)); + env_ = Env.getDefault(); + } + + /** + * Copy constructor for ColumnFamilyOptions. + * + * NOTE: This does a shallow copy, which means comparator, merge_operator + * and other pointers will be cloned! + * + * @param other The Options to copy. + */ + public Options(Options other) { + super(copyOptions(other.nativeHandle_)); + this.env_ = other.env_; + this.memTableConfig_ = other.memTableConfig_; + this.tableFormatConfig_ = other.tableFormatConfig_; + this.rateLimiter_ = other.rateLimiter_; + this.comparator_ = other.comparator_; + this.compactionFilter_ = other.compactionFilter_; + this.compactionFilterFactory_ = other.compactionFilterFactory_; + this.compactionOptionsUniversal_ = other.compactionOptionsUniversal_; + this.compactionOptionsFIFO_ = other.compactionOptionsFIFO_; + this.compressionOptions_ = other.compressionOptions_; + this.rowCache_ = other.rowCache_; + this.writeBufferManager_ = other.writeBufferManager_; + this.compactionThreadLimiter_ = other.compactionThreadLimiter_; + this.bottommostCompressionOptions_ = other.bottommostCompressionOptions_; + this.walFilter_ = other.walFilter_; + this.sstPartitionerFactory_ = other.sstPartitionerFactory_; + } + + @Override + public Options setIncreaseParallelism(final int totalThreads) { + assert(isOwningHandle()); + setIncreaseParallelism(nativeHandle_, totalThreads); + return this; + } + + @Override + public Options setCreateIfMissing(final boolean flag) { + assert(isOwningHandle()); + setCreateIfMissing(nativeHandle_, flag); + return this; + } + + @Override + public Options setCreateMissingColumnFamilies(final boolean flag) { + assert(isOwningHandle()); + setCreateMissingColumnFamilies(nativeHandle_, flag); + return this; + } + + @Override + public Options setEnv(final Env env) { + assert(isOwningHandle()); + setEnv(nativeHandle_, env.nativeHandle_); + env_ = env; + return this; + } + + @Override + public Env getEnv() { + return env_; + } + + /** + *

Set appropriate parameters for bulk loading. + * The reason that this is a function that returns "this" instead of a + * constructor is to enable chaining of multiple similar calls in the future. + *

+ * + *

All data will be in level 0 without any automatic compaction. + * It's recommended to manually call CompactRange(NULL, NULL) before reading + * from the database, because otherwise the read can be very slow.

+ * + * @return the instance of the current Options. + */ + public Options prepareForBulkLoad() { + prepareForBulkLoad(nativeHandle_); + return this; + } + + @Override + public boolean createIfMissing() { + assert(isOwningHandle()); + return createIfMissing(nativeHandle_); + } + + @Override + public boolean createMissingColumnFamilies() { + assert(isOwningHandle()); + return createMissingColumnFamilies(nativeHandle_); + } + + @Override + public Options oldDefaults(final int majorVersion, final int minorVersion) { + oldDefaults(nativeHandle_, majorVersion, minorVersion); + return this; + } + + @Override + public Options optimizeForSmallDb() { + optimizeForSmallDb(nativeHandle_); + return this; + } + + @Override + public Options optimizeForSmallDb(final Cache cache) { + optimizeForSmallDb(nativeHandle_, cache.getNativeHandle()); + return this; + } + + @Override + public Options optimizeForPointLookup( + long blockCacheSizeMb) { + optimizeForPointLookup(nativeHandle_, + blockCacheSizeMb); + return this; + } + + @Override + public Options optimizeLevelStyleCompaction() { + optimizeLevelStyleCompaction(nativeHandle_, + DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET); + return this; + } + + @Override + public Options optimizeLevelStyleCompaction( + long memtableMemoryBudget) { + optimizeLevelStyleCompaction(nativeHandle_, + memtableMemoryBudget); + return this; + } + + @Override + public Options optimizeUniversalStyleCompaction() { + optimizeUniversalStyleCompaction(nativeHandle_, + DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET); + return this; + } + + @Override + public Options optimizeUniversalStyleCompaction( + final long memtableMemoryBudget) { + optimizeUniversalStyleCompaction(nativeHandle_, + memtableMemoryBudget); + return this; + } + + @Override + public Options setComparator(final BuiltinComparator builtinComparator) { + assert(isOwningHandle()); + setComparatorHandle(nativeHandle_, builtinComparator.ordinal()); + return this; + } + + @Override + public Options setComparator( + final AbstractComparator comparator) { + assert(isOwningHandle()); + setComparatorHandle(nativeHandle_, comparator.nativeHandle_, + comparator.getComparatorType().getValue()); + comparator_ = comparator; + return this; + } + + @Override + public Options setMergeOperatorName(final String name) { + assert(isOwningHandle()); + if (name == null) { + throw new IllegalArgumentException( + "Merge operator name must not be null."); + } + setMergeOperatorName(nativeHandle_, name); + return this; + } + + @Override + public Options setMergeOperator(final MergeOperator mergeOperator) { + setMergeOperator(nativeHandle_, mergeOperator.nativeHandle_); + return this; + } + + @Override + public Options setCompactionFilter( + final AbstractCompactionFilter> + compactionFilter) { + setCompactionFilterHandle(nativeHandle_, compactionFilter.nativeHandle_); + compactionFilter_ = compactionFilter; + return this; + } + + @Override + public AbstractCompactionFilter> compactionFilter() { + assert (isOwningHandle()); + return compactionFilter_; + } + + @Override + public Options setCompactionFilterFactory(final AbstractCompactionFilterFactory> compactionFilterFactory) { + assert (isOwningHandle()); + setCompactionFilterFactoryHandle(nativeHandle_, compactionFilterFactory.nativeHandle_); + compactionFilterFactory_ = compactionFilterFactory; + return this; + } + + @Override + public AbstractCompactionFilterFactory> compactionFilterFactory() { + assert (isOwningHandle()); + return compactionFilterFactory_; + } + + @Override + public Options setWriteBufferSize(final long writeBufferSize) { + assert(isOwningHandle()); + setWriteBufferSize(nativeHandle_, writeBufferSize); + return this; + } + + @Override + public long writeBufferSize() { + assert(isOwningHandle()); + return writeBufferSize(nativeHandle_); + } + + @Override + public Options setMaxWriteBufferNumber(final int maxWriteBufferNumber) { + assert(isOwningHandle()); + setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber); + return this; + } + + @Override + public int maxWriteBufferNumber() { + assert(isOwningHandle()); + return maxWriteBufferNumber(nativeHandle_); + } + + @Override + public boolean errorIfExists() { + assert(isOwningHandle()); + return errorIfExists(nativeHandle_); + } + + @Override + public Options setErrorIfExists(final boolean errorIfExists) { + assert(isOwningHandle()); + setErrorIfExists(nativeHandle_, errorIfExists); + return this; + } + + @Override + public boolean paranoidChecks() { + assert(isOwningHandle()); + return paranoidChecks(nativeHandle_); + } + + @Override + public Options setParanoidChecks(final boolean paranoidChecks) { + assert(isOwningHandle()); + setParanoidChecks(nativeHandle_, paranoidChecks); + return this; + } + + @Override + public int maxOpenFiles() { + assert(isOwningHandle()); + return maxOpenFiles(nativeHandle_); + } + + @Override + public Options setMaxFileOpeningThreads(final int maxFileOpeningThreads) { + assert(isOwningHandle()); + setMaxFileOpeningThreads(nativeHandle_, maxFileOpeningThreads); + return this; + } + + @Override + public int maxFileOpeningThreads() { + assert(isOwningHandle()); + return maxFileOpeningThreads(nativeHandle_); + } + + @Override + public Options setMaxTotalWalSize(final long maxTotalWalSize) { + assert(isOwningHandle()); + setMaxTotalWalSize(nativeHandle_, maxTotalWalSize); + return this; + } + + @Override + public long maxTotalWalSize() { + assert(isOwningHandle()); + return maxTotalWalSize(nativeHandle_); + } + + @Override + public Options setMaxOpenFiles(final int maxOpenFiles) { + assert(isOwningHandle()); + setMaxOpenFiles(nativeHandle_, maxOpenFiles); + return this; + } + + @Override + public boolean useFsync() { + assert(isOwningHandle()); + return useFsync(nativeHandle_); + } + + @Override + public Options setUseFsync(final boolean useFsync) { + assert(isOwningHandle()); + setUseFsync(nativeHandle_, useFsync); + return this; + } + + @Override + public Options setDbPaths(final Collection dbPaths) { + assert(isOwningHandle()); + + final int len = dbPaths.size(); + final String paths[] = new String[len]; + final long targetSizes[] = new long[len]; + + int i = 0; + for(final DbPath dbPath : dbPaths) { + paths[i] = dbPath.path.toString(); + targetSizes[i] = dbPath.targetSize; + i++; + } + setDbPaths(nativeHandle_, paths, targetSizes); + return this; + } + + @Override + public List dbPaths() { + final int len = (int)dbPathsLen(nativeHandle_); + if(len == 0) { + return Collections.emptyList(); + } else { + final String paths[] = new String[len]; + final long targetSizes[] = new long[len]; + + dbPaths(nativeHandle_, paths, targetSizes); + + final List dbPaths = new ArrayList<>(); + for(int i = 0; i < len; i++) { + dbPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i])); + } + return dbPaths; + } + } + + @Override + public String dbLogDir() { + assert(isOwningHandle()); + return dbLogDir(nativeHandle_); + } + + @Override + public Options setDbLogDir(final String dbLogDir) { + assert(isOwningHandle()); + setDbLogDir(nativeHandle_, dbLogDir); + return this; + } + + @Override + public String walDir() { + assert(isOwningHandle()); + return walDir(nativeHandle_); + } + + @Override + public Options setWalDir(final String walDir) { + assert(isOwningHandle()); + setWalDir(nativeHandle_, walDir); + return this; + } + + @Override + public long deleteObsoleteFilesPeriodMicros() { + assert(isOwningHandle()); + return deleteObsoleteFilesPeriodMicros(nativeHandle_); + } + + @Override + public Options setDeleteObsoleteFilesPeriodMicros( + final long micros) { + assert(isOwningHandle()); + setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros); + return this; + } + + @Override + @Deprecated + public int maxBackgroundCompactions() { + assert(isOwningHandle()); + return maxBackgroundCompactions(nativeHandle_); + } + + @Override + public Options setStatistics(final Statistics statistics) { + assert(isOwningHandle()); + setStatistics(nativeHandle_, statistics.nativeHandle_); + return this; + } + + @Override + public Statistics statistics() { + assert(isOwningHandle()); + final long statisticsNativeHandle = statistics(nativeHandle_); + if(statisticsNativeHandle == 0) { + return null; + } else { + return new Statistics(statisticsNativeHandle); + } + } + + @Override + @Deprecated + public Options setMaxBackgroundCompactions( + final int maxBackgroundCompactions) { + assert(isOwningHandle()); + setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions); + return this; + } + + @Override + public Options setMaxSubcompactions(final int maxSubcompactions) { + assert(isOwningHandle()); + setMaxSubcompactions(nativeHandle_, maxSubcompactions); + return this; + } + + @Override + public int maxSubcompactions() { + assert(isOwningHandle()); + return maxSubcompactions(nativeHandle_); + } + + @Override + @Deprecated + public int maxBackgroundFlushes() { + assert(isOwningHandle()); + return maxBackgroundFlushes(nativeHandle_); + } + + @Override + @Deprecated + public Options setMaxBackgroundFlushes( + final int maxBackgroundFlushes) { + assert(isOwningHandle()); + setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes); + return this; + } + + @Override + public int maxBackgroundJobs() { + assert(isOwningHandle()); + return maxBackgroundJobs(nativeHandle_); + } + + @Override + public Options setMaxBackgroundJobs(final int maxBackgroundJobs) { + assert(isOwningHandle()); + setMaxBackgroundJobs(nativeHandle_, maxBackgroundJobs); + return this; + } + + @Override + public long maxLogFileSize() { + assert(isOwningHandle()); + return maxLogFileSize(nativeHandle_); + } + + @Override + public Options setMaxLogFileSize(final long maxLogFileSize) { + assert(isOwningHandle()); + setMaxLogFileSize(nativeHandle_, maxLogFileSize); + return this; + } + + @Override + public long logFileTimeToRoll() { + assert(isOwningHandle()); + return logFileTimeToRoll(nativeHandle_); + } + + @Override + public Options setLogFileTimeToRoll(final long logFileTimeToRoll) { + assert(isOwningHandle()); + setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll); + return this; + } + + @Override + public long keepLogFileNum() { + assert(isOwningHandle()); + return keepLogFileNum(nativeHandle_); + } + + @Override + public Options setKeepLogFileNum(final long keepLogFileNum) { + assert(isOwningHandle()); + setKeepLogFileNum(nativeHandle_, keepLogFileNum); + return this; + } + + + @Override + public Options setRecycleLogFileNum(final long recycleLogFileNum) { + assert(isOwningHandle()); + setRecycleLogFileNum(nativeHandle_, recycleLogFileNum); + return this; + } + + @Override + public long recycleLogFileNum() { + assert(isOwningHandle()); + return recycleLogFileNum(nativeHandle_); + } + + @Override + public long maxManifestFileSize() { + assert(isOwningHandle()); + return maxManifestFileSize(nativeHandle_); + } + + @Override + public Options setMaxManifestFileSize( + final long maxManifestFileSize) { + assert(isOwningHandle()); + setMaxManifestFileSize(nativeHandle_, maxManifestFileSize); + return this; + } + + @Override + public Options setMaxTableFilesSizeFIFO( + final long maxTableFilesSize) { + assert(maxTableFilesSize > 0); // unsigned native type + assert(isOwningHandle()); + setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize); + return this; + } + + @Override + public long maxTableFilesSizeFIFO() { + return maxTableFilesSizeFIFO(nativeHandle_); + } + + @Override + public int tableCacheNumshardbits() { + assert(isOwningHandle()); + return tableCacheNumshardbits(nativeHandle_); + } + + @Override + public Options setTableCacheNumshardbits( + final int tableCacheNumshardbits) { + assert(isOwningHandle()); + setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits); + return this; + } + + @Override + public long walTtlSeconds() { + assert(isOwningHandle()); + return walTtlSeconds(nativeHandle_); + } + + @Override + public Options setWalTtlSeconds(final long walTtlSeconds) { + assert(isOwningHandle()); + setWalTtlSeconds(nativeHandle_, walTtlSeconds); + return this; + } + + @Override + public long walSizeLimitMB() { + assert(isOwningHandle()); + return walSizeLimitMB(nativeHandle_); + } + + @Override + public Options setMaxWriteBatchGroupSizeBytes(long maxWriteBatchGroupSizeBytes) { + setMaxWriteBatchGroupSizeBytes(nativeHandle_, maxWriteBatchGroupSizeBytes); + return this; + } + + @Override + public long maxWriteBatchGroupSizeBytes() { + assert (isOwningHandle()); + return maxWriteBatchGroupSizeBytes(nativeHandle_); + } + + @Override + public Options setWalSizeLimitMB(final long sizeLimitMB) { + assert(isOwningHandle()); + setWalSizeLimitMB(nativeHandle_, sizeLimitMB); + return this; + } + + @Override + public long manifestPreallocationSize() { + assert(isOwningHandle()); + return manifestPreallocationSize(nativeHandle_); + } + + @Override + public Options setManifestPreallocationSize(final long size) { + assert(isOwningHandle()); + setManifestPreallocationSize(nativeHandle_, size); + return this; + } + + @Override + public Options setUseDirectReads(final boolean useDirectReads) { + assert(isOwningHandle()); + setUseDirectReads(nativeHandle_, useDirectReads); + return this; + } + + @Override + public boolean useDirectReads() { + assert(isOwningHandle()); + return useDirectReads(nativeHandle_); + } + + @Override + public Options setUseDirectIoForFlushAndCompaction( + final boolean useDirectIoForFlushAndCompaction) { + assert(isOwningHandle()); + setUseDirectIoForFlushAndCompaction(nativeHandle_, useDirectIoForFlushAndCompaction); + return this; + } + + @Override + public boolean useDirectIoForFlushAndCompaction() { + assert(isOwningHandle()); + return useDirectIoForFlushAndCompaction(nativeHandle_); + } + + @Override + public Options setAllowFAllocate(final boolean allowFAllocate) { + assert(isOwningHandle()); + setAllowFAllocate(nativeHandle_, allowFAllocate); + return this; + } + + @Override + public boolean allowFAllocate() { + assert(isOwningHandle()); + return allowFAllocate(nativeHandle_); + } + + @Override + public boolean allowMmapReads() { + assert(isOwningHandle()); + return allowMmapReads(nativeHandle_); + } + + @Override + public Options setAllowMmapReads(final boolean allowMmapReads) { + assert(isOwningHandle()); + setAllowMmapReads(nativeHandle_, allowMmapReads); + return this; + } + + @Override + public boolean allowMmapWrites() { + assert(isOwningHandle()); + return allowMmapWrites(nativeHandle_); + } + + @Override + public Options setAllowMmapWrites(final boolean allowMmapWrites) { + assert(isOwningHandle()); + setAllowMmapWrites(nativeHandle_, allowMmapWrites); + return this; + } + + @Override + public boolean isFdCloseOnExec() { + assert(isOwningHandle()); + return isFdCloseOnExec(nativeHandle_); + } + + @Override + public Options setIsFdCloseOnExec(final boolean isFdCloseOnExec) { + assert(isOwningHandle()); + setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec); + return this; + } + + @Override + public int statsDumpPeriodSec() { + assert(isOwningHandle()); + return statsDumpPeriodSec(nativeHandle_); + } + + @Override + public Options setStatsDumpPeriodSec(final int statsDumpPeriodSec) { + assert(isOwningHandle()); + setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec); + return this; + } + + @Override + public Options setStatsPersistPeriodSec( + final int statsPersistPeriodSec) { + assert(isOwningHandle()); + setStatsPersistPeriodSec(nativeHandle_, statsPersistPeriodSec); + return this; + } + + @Override + public int statsPersistPeriodSec() { + assert(isOwningHandle()); + return statsPersistPeriodSec(nativeHandle_); + } + + @Override + public Options setStatsHistoryBufferSize( + final long statsHistoryBufferSize) { + assert(isOwningHandle()); + setStatsHistoryBufferSize(nativeHandle_, statsHistoryBufferSize); + return this; + } + + @Override + public long statsHistoryBufferSize() { + assert(isOwningHandle()); + return statsHistoryBufferSize(nativeHandle_); + } + + @Override + public boolean adviseRandomOnOpen() { + return adviseRandomOnOpen(nativeHandle_); + } + + @Override + public Options setAdviseRandomOnOpen(final boolean adviseRandomOnOpen) { + assert(isOwningHandle()); + setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen); + return this; + } + + @Override + public Options setDbWriteBufferSize(final long dbWriteBufferSize) { + assert(isOwningHandle()); + setDbWriteBufferSize(nativeHandle_, dbWriteBufferSize); + return this; + } + + @Override + public Options setWriteBufferManager(final WriteBufferManager writeBufferManager) { + assert(isOwningHandle()); + setWriteBufferManager(nativeHandle_, writeBufferManager.nativeHandle_); + this.writeBufferManager_ = writeBufferManager; + return this; + } + + @Override + public WriteBufferManager writeBufferManager() { + assert(isOwningHandle()); + return this.writeBufferManager_; + } + + @Override + public long dbWriteBufferSize() { + assert(isOwningHandle()); + return dbWriteBufferSize(nativeHandle_); + } + + @Override + public Options setAccessHintOnCompactionStart(final AccessHint accessHint) { + assert(isOwningHandle()); + setAccessHintOnCompactionStart(nativeHandle_, accessHint.getValue()); + return this; + } + + @Override + public AccessHint accessHintOnCompactionStart() { + assert(isOwningHandle()); + return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_)); + } + + @Override + public Options setCompactionReadaheadSize(final long compactionReadaheadSize) { + assert(isOwningHandle()); + setCompactionReadaheadSize(nativeHandle_, compactionReadaheadSize); + return this; + } + + @Override + public long compactionReadaheadSize() { + assert(isOwningHandle()); + return compactionReadaheadSize(nativeHandle_); + } + + @Override + public Options setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) { + assert(isOwningHandle()); + setRandomAccessMaxBufferSize(nativeHandle_, randomAccessMaxBufferSize); + return this; + } + + @Override + public long randomAccessMaxBufferSize() { + assert(isOwningHandle()); + return randomAccessMaxBufferSize(nativeHandle_); + } + + @Override + public Options setWritableFileMaxBufferSize(final long writableFileMaxBufferSize) { + assert(isOwningHandle()); + setWritableFileMaxBufferSize(nativeHandle_, writableFileMaxBufferSize); + return this; + } + + @Override + public long writableFileMaxBufferSize() { + assert(isOwningHandle()); + return writableFileMaxBufferSize(nativeHandle_); + } + + @Override + public boolean useAdaptiveMutex() { + assert(isOwningHandle()); + return useAdaptiveMutex(nativeHandle_); + } + + @Override + public Options setUseAdaptiveMutex(final boolean useAdaptiveMutex) { + assert(isOwningHandle()); + setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex); + return this; + } + + @Override + public long bytesPerSync() { + return bytesPerSync(nativeHandle_); + } + + @Override + public Options setBytesPerSync(final long bytesPerSync) { + assert(isOwningHandle()); + setBytesPerSync(nativeHandle_, bytesPerSync); + return this; + } + + @Override + public Options setWalBytesPerSync(final long walBytesPerSync) { + assert(isOwningHandle()); + setWalBytesPerSync(nativeHandle_, walBytesPerSync); + return this; + } + + @Override + public long walBytesPerSync() { + assert(isOwningHandle()); + return walBytesPerSync(nativeHandle_); + } + + @Override + public Options setStrictBytesPerSync(final boolean strictBytesPerSync) { + assert(isOwningHandle()); + setStrictBytesPerSync(nativeHandle_, strictBytesPerSync); + return this; + } + + @Override + public boolean strictBytesPerSync() { + assert(isOwningHandle()); + return strictBytesPerSync(nativeHandle_); + } + + @Override + public Options setListeners(final List listeners) { + assert (isOwningHandle()); + setEventListeners(nativeHandle_, RocksCallbackObject.toNativeHandleList(listeners)); + return this; + } + + @Override + public List listeners() { + assert (isOwningHandle()); + return Arrays.asList(eventListeners(nativeHandle_)); + } + + @Override + public Options setEnableThreadTracking(final boolean enableThreadTracking) { + assert(isOwningHandle()); + setEnableThreadTracking(nativeHandle_, enableThreadTracking); + return this; + } + + @Override + public boolean enableThreadTracking() { + assert(isOwningHandle()); + return enableThreadTracking(nativeHandle_); + } + + @Override + public Options setDelayedWriteRate(final long delayedWriteRate) { + assert(isOwningHandle()); + setDelayedWriteRate(nativeHandle_, delayedWriteRate); + return this; + } + + @Override + public long delayedWriteRate(){ + return delayedWriteRate(nativeHandle_); + } + + @Override + public Options setEnablePipelinedWrite(final boolean enablePipelinedWrite) { + setEnablePipelinedWrite(nativeHandle_, enablePipelinedWrite); + return this; + } + + @Override + public boolean enablePipelinedWrite() { + return enablePipelinedWrite(nativeHandle_); + } + + @Override + public Options setUnorderedWrite(final boolean unorderedWrite) { + setUnorderedWrite(nativeHandle_, unorderedWrite); + return this; + } + + @Override + public boolean unorderedWrite() { + return unorderedWrite(nativeHandle_); + } + + @Override + public Options setAllowConcurrentMemtableWrite( + final boolean allowConcurrentMemtableWrite) { + setAllowConcurrentMemtableWrite(nativeHandle_, + allowConcurrentMemtableWrite); + return this; + } + + @Override + public boolean allowConcurrentMemtableWrite() { + return allowConcurrentMemtableWrite(nativeHandle_); + } + + @Override + public Options setEnableWriteThreadAdaptiveYield( + final boolean enableWriteThreadAdaptiveYield) { + setEnableWriteThreadAdaptiveYield(nativeHandle_, + enableWriteThreadAdaptiveYield); + return this; + } + + @Override + public boolean enableWriteThreadAdaptiveYield() { + return enableWriteThreadAdaptiveYield(nativeHandle_); + } + + @Override + public Options setWriteThreadMaxYieldUsec(final long writeThreadMaxYieldUsec) { + setWriteThreadMaxYieldUsec(nativeHandle_, writeThreadMaxYieldUsec); + return this; + } + + @Override + public long writeThreadMaxYieldUsec() { + return writeThreadMaxYieldUsec(nativeHandle_); + } + + @Override + public Options setWriteThreadSlowYieldUsec(final long writeThreadSlowYieldUsec) { + setWriteThreadSlowYieldUsec(nativeHandle_, writeThreadSlowYieldUsec); + return this; + } + + @Override + public long writeThreadSlowYieldUsec() { + return writeThreadSlowYieldUsec(nativeHandle_); + } + + @Override + public Options setSkipStatsUpdateOnDbOpen(final boolean skipStatsUpdateOnDbOpen) { + assert(isOwningHandle()); + setSkipStatsUpdateOnDbOpen(nativeHandle_, skipStatsUpdateOnDbOpen); + return this; + } + + @Override + public boolean skipStatsUpdateOnDbOpen() { + assert(isOwningHandle()); + return skipStatsUpdateOnDbOpen(nativeHandle_); + } + + @Override + public Options setSkipCheckingSstFileSizesOnDbOpen(boolean skipCheckingSstFileSizesOnDbOpen) { + setSkipCheckingSstFileSizesOnDbOpen(nativeHandle_, skipCheckingSstFileSizesOnDbOpen); + return this; + } + + @Override + public boolean skipCheckingSstFileSizesOnDbOpen() { + assert (isOwningHandle()); + return skipCheckingSstFileSizesOnDbOpen(nativeHandle_); + } + + @Override + public Options setWalRecoveryMode(final WALRecoveryMode walRecoveryMode) { + assert(isOwningHandle()); + setWalRecoveryMode(nativeHandle_, walRecoveryMode.getValue()); + return this; + } + + @Override + public WALRecoveryMode walRecoveryMode() { + assert(isOwningHandle()); + return WALRecoveryMode.getWALRecoveryMode(walRecoveryMode(nativeHandle_)); + } + + @Override + public Options setAllow2pc(final boolean allow2pc) { + assert(isOwningHandle()); + setAllow2pc(nativeHandle_, allow2pc); + return this; + } + + @Override + public boolean allow2pc() { + assert(isOwningHandle()); + return allow2pc(nativeHandle_); + } + + @Override + public Options setRowCache(final Cache rowCache) { + assert(isOwningHandle()); + setRowCache(nativeHandle_, rowCache.nativeHandle_); + this.rowCache_ = rowCache; + return this; + } + + @Override + public Cache rowCache() { + assert(isOwningHandle()); + return this.rowCache_; + } + + @Override + public Options setWalFilter(final AbstractWalFilter walFilter) { + assert(isOwningHandle()); + setWalFilter(nativeHandle_, walFilter.nativeHandle_); + this.walFilter_ = walFilter; + return this; + } + + @Override + public WalFilter walFilter() { + assert(isOwningHandle()); + return this.walFilter_; + } + + @Override + public Options setFailIfOptionsFileError(final boolean failIfOptionsFileError) { + assert(isOwningHandle()); + setFailIfOptionsFileError(nativeHandle_, failIfOptionsFileError); + return this; + } + + @Override + public boolean failIfOptionsFileError() { + assert(isOwningHandle()); + return failIfOptionsFileError(nativeHandle_); + } + + @Override + public Options setDumpMallocStats(final boolean dumpMallocStats) { + assert(isOwningHandle()); + setDumpMallocStats(nativeHandle_, dumpMallocStats); + return this; + } + + @Override + public boolean dumpMallocStats() { + assert(isOwningHandle()); + return dumpMallocStats(nativeHandle_); + } + + @Override + public Options setAvoidFlushDuringRecovery(final boolean avoidFlushDuringRecovery) { + assert(isOwningHandle()); + setAvoidFlushDuringRecovery(nativeHandle_, avoidFlushDuringRecovery); + return this; + } + + @Override + public boolean avoidFlushDuringRecovery() { + assert(isOwningHandle()); + return avoidFlushDuringRecovery(nativeHandle_); + } + + @Override + public Options setAvoidFlushDuringShutdown(final boolean avoidFlushDuringShutdown) { + assert(isOwningHandle()); + setAvoidFlushDuringShutdown(nativeHandle_, avoidFlushDuringShutdown); + return this; + } + + @Override + public boolean avoidFlushDuringShutdown() { + assert(isOwningHandle()); + return avoidFlushDuringShutdown(nativeHandle_); + } + + @Override + public Options setAllowIngestBehind(final boolean allowIngestBehind) { + assert(isOwningHandle()); + setAllowIngestBehind(nativeHandle_, allowIngestBehind); + return this; + } + + @Override + public boolean allowIngestBehind() { + assert(isOwningHandle()); + return allowIngestBehind(nativeHandle_); + } + + @Override + public Options setTwoWriteQueues(final boolean twoWriteQueues) { + assert(isOwningHandle()); + setTwoWriteQueues(nativeHandle_, twoWriteQueues); + return this; + } + + @Override + public boolean twoWriteQueues() { + assert(isOwningHandle()); + return twoWriteQueues(nativeHandle_); + } + + @Override + public Options setManualWalFlush(final boolean manualWalFlush) { + assert(isOwningHandle()); + setManualWalFlush(nativeHandle_, manualWalFlush); + return this; + } + + @Override + public boolean manualWalFlush() { + assert(isOwningHandle()); + return manualWalFlush(nativeHandle_); + } + + @Override + public MemTableConfig memTableConfig() { + return this.memTableConfig_; + } + + @Override + public Options setMemTableConfig(final MemTableConfig config) { + memTableConfig_ = config; + setMemTableFactory(nativeHandle_, config.newMemTableFactoryHandle()); + return this; + } + + @Override + public Options setRateLimiter(final RateLimiter rateLimiter) { + assert(isOwningHandle()); + rateLimiter_ = rateLimiter; + setRateLimiter(nativeHandle_, rateLimiter.nativeHandle_); + return this; + } + + @Override + public Options setSstFileManager(final SstFileManager sstFileManager) { + assert(isOwningHandle()); + setSstFileManager(nativeHandle_, sstFileManager.nativeHandle_); + return this; + } + + @Override + public Options setLogger(final Logger logger) { + assert(isOwningHandle()); + setLogger(nativeHandle_, logger.nativeHandle_); + return this; + } + + @Override + public Options setInfoLogLevel(final InfoLogLevel infoLogLevel) { + assert(isOwningHandle()); + setInfoLogLevel(nativeHandle_, infoLogLevel.getValue()); + return this; + } + + @Override + public InfoLogLevel infoLogLevel() { + assert(isOwningHandle()); + return InfoLogLevel.getInfoLogLevel( + infoLogLevel(nativeHandle_)); + } + + @Override + public String memTableFactoryName() { + assert(isOwningHandle()); + return memTableFactoryName(nativeHandle_); + } + + @Override + public TableFormatConfig tableFormatConfig() { + return this.tableFormatConfig_; + } + + @Override + public Options setTableFormatConfig(final TableFormatConfig config) { + tableFormatConfig_ = config; + setTableFactory(nativeHandle_, config.newTableFactoryHandle()); + return this; + } + + @Override + public String tableFactoryName() { + assert(isOwningHandle()); + return tableFactoryName(nativeHandle_); + } + + @Override + public Options setCfPaths(final Collection cfPaths) { + assert (isOwningHandle()); + + final int len = cfPaths.size(); + final String[] paths = new String[len]; + final long[] targetSizes = new long[len]; + + int i = 0; + for (final DbPath dbPath : cfPaths) { + paths[i] = dbPath.path.toString(); + targetSizes[i] = dbPath.targetSize; + i++; + } + setCfPaths(nativeHandle_, paths, targetSizes); + return this; + } + + @Override + public List cfPaths() { + final int len = (int) cfPathsLen(nativeHandle_); + + if (len == 0) { + return Collections.emptyList(); + } + + final String[] paths = new String[len]; + final long[] targetSizes = new long[len]; + + cfPaths(nativeHandle_, paths, targetSizes); + + final List cfPaths = new ArrayList<>(); + for (int i = 0; i < len; i++) { + cfPaths.add(new DbPath(Paths.get(paths[i]), targetSizes[i])); + } + + return cfPaths; + } + + @Override + public Options useFixedLengthPrefixExtractor(final int n) { + assert(isOwningHandle()); + useFixedLengthPrefixExtractor(nativeHandle_, n); + return this; + } + + @Override + public Options useCappedPrefixExtractor(final int n) { + assert(isOwningHandle()); + useCappedPrefixExtractor(nativeHandle_, n); + return this; + } + + @Override + public CompressionType compressionType() { + return CompressionType.getCompressionType(compressionType(nativeHandle_)); + } + + @Override + public Options setCompressionPerLevel( + final List compressionLevels) { + final byte[] byteCompressionTypes = new byte[ + compressionLevels.size()]; + for (int i = 0; i < compressionLevels.size(); i++) { + byteCompressionTypes[i] = compressionLevels.get(i).getValue(); + } + setCompressionPerLevel(nativeHandle_, byteCompressionTypes); + return this; + } + + @Override + public List compressionPerLevel() { + final byte[] byteCompressionTypes = + compressionPerLevel(nativeHandle_); + final List compressionLevels = new ArrayList<>(); + for (final byte byteCompressionType : byteCompressionTypes) { + compressionLevels.add(CompressionType.getCompressionType( + byteCompressionType)); + } + return compressionLevels; + } + + @Override + public Options setCompressionType(CompressionType compressionType) { + setCompressionType(nativeHandle_, compressionType.getValue()); + return this; + } + + + @Override + public Options setBottommostCompressionType( + final CompressionType bottommostCompressionType) { + setBottommostCompressionType(nativeHandle_, + bottommostCompressionType.getValue()); + return this; + } + + @Override + public CompressionType bottommostCompressionType() { + return CompressionType.getCompressionType( + bottommostCompressionType(nativeHandle_)); + } + + @Override + public Options setBottommostCompressionOptions( + final CompressionOptions bottommostCompressionOptions) { + setBottommostCompressionOptions(nativeHandle_, + bottommostCompressionOptions.nativeHandle_); + this.bottommostCompressionOptions_ = bottommostCompressionOptions; + return this; + } + + @Override + public CompressionOptions bottommostCompressionOptions() { + return this.bottommostCompressionOptions_; + } + + @Override + public Options setCompressionOptions( + final CompressionOptions compressionOptions) { + setCompressionOptions(nativeHandle_, compressionOptions.nativeHandle_); + this.compressionOptions_ = compressionOptions; + return this; + } + + @Override + public CompressionOptions compressionOptions() { + return this.compressionOptions_; + } + + @Override + public CompactionStyle compactionStyle() { + return CompactionStyle.fromValue(compactionStyle(nativeHandle_)); + } + + @Override + public Options setCompactionStyle( + final CompactionStyle compactionStyle) { + setCompactionStyle(nativeHandle_, compactionStyle.getValue()); + return this; + } + + @Override + public int numLevels() { + return numLevels(nativeHandle_); + } + + @Override + public Options setNumLevels(int numLevels) { + setNumLevels(nativeHandle_, numLevels); + return this; + } + + @Override + public int levelZeroFileNumCompactionTrigger() { + return levelZeroFileNumCompactionTrigger(nativeHandle_); + } + + @Override + public Options setLevelZeroFileNumCompactionTrigger( + final int numFiles) { + setLevelZeroFileNumCompactionTrigger( + nativeHandle_, numFiles); + return this; + } + + @Override + public int levelZeroSlowdownWritesTrigger() { + return levelZeroSlowdownWritesTrigger(nativeHandle_); + } + + @Override + public Options setLevelZeroSlowdownWritesTrigger( + final int numFiles) { + setLevelZeroSlowdownWritesTrigger(nativeHandle_, numFiles); + return this; + } + + @Override + public int levelZeroStopWritesTrigger() { + return levelZeroStopWritesTrigger(nativeHandle_); + } + + @Override + public Options setLevelZeroStopWritesTrigger( + final int numFiles) { + setLevelZeroStopWritesTrigger(nativeHandle_, numFiles); + return this; + } + + @Override + public long targetFileSizeBase() { + return targetFileSizeBase(nativeHandle_); + } + + @Override + public Options setTargetFileSizeBase(long targetFileSizeBase) { + setTargetFileSizeBase(nativeHandle_, targetFileSizeBase); + return this; + } + + @Override + public int targetFileSizeMultiplier() { + return targetFileSizeMultiplier(nativeHandle_); + } + + @Override + public Options setTargetFileSizeMultiplier(int multiplier) { + setTargetFileSizeMultiplier(nativeHandle_, multiplier); + return this; + } + + @Override + public Options setMaxBytesForLevelBase(final long maxBytesForLevelBase) { + setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase); + return this; + } + + @Override + public long maxBytesForLevelBase() { + return maxBytesForLevelBase(nativeHandle_); + } + + @Override + public Options setLevelCompactionDynamicLevelBytes( + final boolean enableLevelCompactionDynamicLevelBytes) { + setLevelCompactionDynamicLevelBytes(nativeHandle_, + enableLevelCompactionDynamicLevelBytes); + return this; + } + + @Override + public boolean levelCompactionDynamicLevelBytes() { + return levelCompactionDynamicLevelBytes(nativeHandle_); + } + + @Override + public double maxBytesForLevelMultiplier() { + return maxBytesForLevelMultiplier(nativeHandle_); + } + + @Override + public Options setMaxBytesForLevelMultiplier(final double multiplier) { + setMaxBytesForLevelMultiplier(nativeHandle_, multiplier); + return this; + } + + @Override + public long maxCompactionBytes() { + return maxCompactionBytes(nativeHandle_); + } + + @Override + public Options setMaxCompactionBytes(final long maxCompactionBytes) { + setMaxCompactionBytes(nativeHandle_, maxCompactionBytes); + return this; + } + + @Override + public long arenaBlockSize() { + return arenaBlockSize(nativeHandle_); + } + + @Override + public Options setArenaBlockSize(final long arenaBlockSize) { + setArenaBlockSize(nativeHandle_, arenaBlockSize); + return this; + } + + @Override + public boolean disableAutoCompactions() { + return disableAutoCompactions(nativeHandle_); + } + + @Override + public Options setDisableAutoCompactions( + final boolean disableAutoCompactions) { + setDisableAutoCompactions(nativeHandle_, disableAutoCompactions); + return this; + } + + @Override + public long maxSequentialSkipInIterations() { + return maxSequentialSkipInIterations(nativeHandle_); + } + + @Override + public Options setMaxSequentialSkipInIterations( + final long maxSequentialSkipInIterations) { + setMaxSequentialSkipInIterations(nativeHandle_, + maxSequentialSkipInIterations); + return this; + } + + @Override + public boolean inplaceUpdateSupport() { + return inplaceUpdateSupport(nativeHandle_); + } + + @Override + public Options setInplaceUpdateSupport( + final boolean inplaceUpdateSupport) { + setInplaceUpdateSupport(nativeHandle_, inplaceUpdateSupport); + return this; + } + + @Override + public long inplaceUpdateNumLocks() { + return inplaceUpdateNumLocks(nativeHandle_); + } + + @Override + public Options setInplaceUpdateNumLocks( + final long inplaceUpdateNumLocks) { + setInplaceUpdateNumLocks(nativeHandle_, inplaceUpdateNumLocks); + return this; + } + + @Override + public double memtablePrefixBloomSizeRatio() { + return memtablePrefixBloomSizeRatio(nativeHandle_); + } + + @Override + public Options setMemtablePrefixBloomSizeRatio(final double memtablePrefixBloomSizeRatio) { + setMemtablePrefixBloomSizeRatio(nativeHandle_, memtablePrefixBloomSizeRatio); + return this; + } + + @Override + public double experimentalMempurgeThreshold() { + return experimentalMempurgeThreshold(nativeHandle_); + } + + @Override + public Options setExperimentalMempurgeThreshold(final double experimentalMempurgeThreshold) { + setExperimentalMempurgeThreshold(nativeHandle_, experimentalMempurgeThreshold); + return this; + } + + @Override + public boolean memtableWholeKeyFiltering() { + return memtableWholeKeyFiltering(nativeHandle_); + } + + @Override + public Options setMemtableWholeKeyFiltering(final boolean memtableWholeKeyFiltering) { + setMemtableWholeKeyFiltering(nativeHandle_, memtableWholeKeyFiltering); + return this; + } + + @Override + public int bloomLocality() { + return bloomLocality(nativeHandle_); + } + + @Override + public Options setBloomLocality(final int bloomLocality) { + setBloomLocality(nativeHandle_, bloomLocality); + return this; + } + + @Override + public long maxSuccessiveMerges() { + return maxSuccessiveMerges(nativeHandle_); + } + + @Override + public Options setMaxSuccessiveMerges(long maxSuccessiveMerges) { + setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges); + return this; + } + + @Override + public int minWriteBufferNumberToMerge() { + return minWriteBufferNumberToMerge(nativeHandle_); + } + + @Override + public Options setMinWriteBufferNumberToMerge( + final int minWriteBufferNumberToMerge) { + setMinWriteBufferNumberToMerge(nativeHandle_, minWriteBufferNumberToMerge); + return this; + } + + @Override + public Options setOptimizeFiltersForHits( + final boolean optimizeFiltersForHits) { + setOptimizeFiltersForHits(nativeHandle_, optimizeFiltersForHits); + return this; + } + + @Override + public boolean optimizeFiltersForHits() { + return optimizeFiltersForHits(nativeHandle_); + } + + @Override + public Options + setMemtableHugePageSize( + long memtableHugePageSize) { + setMemtableHugePageSize(nativeHandle_, + memtableHugePageSize); + return this; + } + + @Override + public long memtableHugePageSize() { + return memtableHugePageSize(nativeHandle_); + } + + @Override + public Options setSoftPendingCompactionBytesLimit(long softPendingCompactionBytesLimit) { + setSoftPendingCompactionBytesLimit(nativeHandle_, + softPendingCompactionBytesLimit); + return this; + } + + @Override + public long softPendingCompactionBytesLimit() { + return softPendingCompactionBytesLimit(nativeHandle_); + } + + @Override + public Options setHardPendingCompactionBytesLimit(long hardPendingCompactionBytesLimit) { + setHardPendingCompactionBytesLimit(nativeHandle_, hardPendingCompactionBytesLimit); + return this; + } + + @Override + public long hardPendingCompactionBytesLimit() { + return hardPendingCompactionBytesLimit(nativeHandle_); + } + + @Override + public Options setLevel0FileNumCompactionTrigger(int level0FileNumCompactionTrigger) { + setLevel0FileNumCompactionTrigger(nativeHandle_, level0FileNumCompactionTrigger); + return this; + } + + @Override + public int level0FileNumCompactionTrigger() { + return level0FileNumCompactionTrigger(nativeHandle_); + } + + @Override + public Options setLevel0SlowdownWritesTrigger(int level0SlowdownWritesTrigger) { + setLevel0SlowdownWritesTrigger(nativeHandle_, level0SlowdownWritesTrigger); + return this; + } + + @Override + public int level0SlowdownWritesTrigger() { + return level0SlowdownWritesTrigger(nativeHandle_); + } + + @Override + public Options setLevel0StopWritesTrigger(int level0StopWritesTrigger) { + setLevel0StopWritesTrigger(nativeHandle_, level0StopWritesTrigger); + return this; + } + + @Override + public int level0StopWritesTrigger() { + return level0StopWritesTrigger(nativeHandle_); + } + + @Override + public Options setMaxBytesForLevelMultiplierAdditional(int[] maxBytesForLevelMultiplierAdditional) { + setMaxBytesForLevelMultiplierAdditional(nativeHandle_, maxBytesForLevelMultiplierAdditional); + return this; + } + + @Override + public int[] maxBytesForLevelMultiplierAdditional() { + return maxBytesForLevelMultiplierAdditional(nativeHandle_); + } + + @Override + public Options setParanoidFileChecks(boolean paranoidFileChecks) { + setParanoidFileChecks(nativeHandle_, paranoidFileChecks); + return this; + } + + @Override + public boolean paranoidFileChecks() { + return paranoidFileChecks(nativeHandle_); + } + + @Override + public Options setMaxWriteBufferNumberToMaintain( + final int maxWriteBufferNumberToMaintain) { + setMaxWriteBufferNumberToMaintain( + nativeHandle_, maxWriteBufferNumberToMaintain); + return this; + } + + @Override + public int maxWriteBufferNumberToMaintain() { + return maxWriteBufferNumberToMaintain(nativeHandle_); + } + + @Override + public Options setCompactionPriority( + final CompactionPriority compactionPriority) { + setCompactionPriority(nativeHandle_, compactionPriority.getValue()); + return this; + } + + @Override + public CompactionPriority compactionPriority() { + return CompactionPriority.getCompactionPriority( + compactionPriority(nativeHandle_)); + } + + @Override + public Options setReportBgIoStats(final boolean reportBgIoStats) { + setReportBgIoStats(nativeHandle_, reportBgIoStats); + return this; + } + + @Override + public boolean reportBgIoStats() { + return reportBgIoStats(nativeHandle_); + } + + @Override + public Options setTtl(final long ttl) { + setTtl(nativeHandle_, ttl); + return this; + } + + @Override + public long ttl() { + return ttl(nativeHandle_); + } + + @Override + public Options setPeriodicCompactionSeconds(final long periodicCompactionSeconds) { + setPeriodicCompactionSeconds(nativeHandle_, periodicCompactionSeconds); + return this; + } + + @Override + public long periodicCompactionSeconds() { + return periodicCompactionSeconds(nativeHandle_); + } + + @Override + public Options setCompactionOptionsUniversal( + final CompactionOptionsUniversal compactionOptionsUniversal) { + setCompactionOptionsUniversal(nativeHandle_, + compactionOptionsUniversal.nativeHandle_); + this.compactionOptionsUniversal_ = compactionOptionsUniversal; + return this; + } + + @Override + public CompactionOptionsUniversal compactionOptionsUniversal() { + return this.compactionOptionsUniversal_; + } + + @Override + public Options setCompactionOptionsFIFO(final CompactionOptionsFIFO compactionOptionsFIFO) { + setCompactionOptionsFIFO(nativeHandle_, + compactionOptionsFIFO.nativeHandle_); + this.compactionOptionsFIFO_ = compactionOptionsFIFO; + return this; + } + + @Override + public CompactionOptionsFIFO compactionOptionsFIFO() { + return this.compactionOptionsFIFO_; + } + + @Override + public Options setForceConsistencyChecks(final boolean forceConsistencyChecks) { + setForceConsistencyChecks(nativeHandle_, forceConsistencyChecks); + return this; + } + + @Override + public boolean forceConsistencyChecks() { + return forceConsistencyChecks(nativeHandle_); + } + + @Override + public Options setAtomicFlush(final boolean atomicFlush) { + setAtomicFlush(nativeHandle_, atomicFlush); + return this; + } + + @Override + public boolean atomicFlush() { + return atomicFlush(nativeHandle_); + } + + @Override + public Options setAvoidUnnecessaryBlockingIO(boolean avoidUnnecessaryBlockingIO) { + setAvoidUnnecessaryBlockingIO(nativeHandle_, avoidUnnecessaryBlockingIO); + return this; + } + + @Override + public boolean avoidUnnecessaryBlockingIO() { + assert (isOwningHandle()); + return avoidUnnecessaryBlockingIO(nativeHandle_); + } + + @Override + public Options setPersistStatsToDisk(boolean persistStatsToDisk) { + setPersistStatsToDisk(nativeHandle_, persistStatsToDisk); + return this; + } + + @Override + public boolean persistStatsToDisk() { + assert (isOwningHandle()); + return persistStatsToDisk(nativeHandle_); + } + + @Override + public Options setWriteDbidToManifest(boolean writeDbidToManifest) { + setWriteDbidToManifest(nativeHandle_, writeDbidToManifest); + return this; + } + + @Override + public boolean writeDbidToManifest() { + assert (isOwningHandle()); + return writeDbidToManifest(nativeHandle_); + } + + @Override + public Options setLogReadaheadSize(long logReadaheadSize) { + setLogReadaheadSize(nativeHandle_, logReadaheadSize); + return this; + } + + @Override + public long logReadaheadSize() { + assert (isOwningHandle()); + return logReadaheadSize(nativeHandle_); + } + + @Override + public Options setBestEffortsRecovery(boolean bestEffortsRecovery) { + setBestEffortsRecovery(nativeHandle_, bestEffortsRecovery); + return this; + } + + @Override + public boolean bestEffortsRecovery() { + assert (isOwningHandle()); + return bestEffortsRecovery(nativeHandle_); + } + + @Override + public Options setMaxBgErrorResumeCount(int maxBgerrorResumeCount) { + setMaxBgErrorResumeCount(nativeHandle_, maxBgerrorResumeCount); + return this; + } + + @Override + public int maxBgerrorResumeCount() { + assert (isOwningHandle()); + return maxBgerrorResumeCount(nativeHandle_); + } + + @Override + public Options setBgerrorResumeRetryInterval(long bgerrorResumeRetryInterval) { + setBgerrorResumeRetryInterval(nativeHandle_, bgerrorResumeRetryInterval); + return this; + } + + @Override + public long bgerrorResumeRetryInterval() { + assert (isOwningHandle()); + return bgerrorResumeRetryInterval(nativeHandle_); + } + + @Override + public Options setSstPartitionerFactory(SstPartitionerFactory sstPartitionerFactory) { + setSstPartitionerFactory(nativeHandle_, sstPartitionerFactory.nativeHandle_); + this.sstPartitionerFactory_ = sstPartitionerFactory; + return this; + } + + @Override + public SstPartitionerFactory sstPartitionerFactory() { + return sstPartitionerFactory_; + } + + @Override + public Options setCompactionThreadLimiter(final ConcurrentTaskLimiter compactionThreadLimiter) { + setCompactionThreadLimiter(nativeHandle_, compactionThreadLimiter.nativeHandle_); + this.compactionThreadLimiter_ = compactionThreadLimiter; + return this; + } + + @Override + public ConcurrentTaskLimiter compactionThreadLimiter() { + assert (isOwningHandle()); + return this.compactionThreadLimiter_; + } + + // + // BEGIN options for blobs (integrated BlobDB) + // + + @Override + public Options setEnableBlobFiles(final boolean enableBlobFiles) { + setEnableBlobFiles(nativeHandle_, enableBlobFiles); + return this; + } + + @Override + public boolean enableBlobFiles() { + return enableBlobFiles(nativeHandle_); + } + + @Override + public Options setMinBlobSize(final long minBlobSize) { + setMinBlobSize(nativeHandle_, minBlobSize); + return this; + } + + @Override + public long minBlobSize() { + return minBlobSize(nativeHandle_); + } + + @Override + public Options setBlobFileSize(final long blobFileSize) { + setBlobFileSize(nativeHandle_, blobFileSize); + return this; + } + + @Override + public long blobFileSize() { + return blobFileSize(nativeHandle_); + } + + @Override + public Options setBlobCompressionType(CompressionType compressionType) { + setBlobCompressionType(nativeHandle_, compressionType.getValue()); + return this; + } + + @Override + public CompressionType blobCompressionType() { + return CompressionType.values()[blobCompressionType(nativeHandle_)]; + } + + @Override + public Options setEnableBlobGarbageCollection(final boolean enableBlobGarbageCollection) { + setEnableBlobGarbageCollection(nativeHandle_, enableBlobGarbageCollection); + return this; + } + + @Override + public boolean enableBlobGarbageCollection() { + return enableBlobGarbageCollection(nativeHandle_); + } + + @Override + public Options setBlobGarbageCollectionAgeCutoff(final double blobGarbageCollectionAgeCutoff) { + setBlobGarbageCollectionAgeCutoff(nativeHandle_, blobGarbageCollectionAgeCutoff); + return this; + } + + @Override + public double blobGarbageCollectionAgeCutoff() { + return blobGarbageCollectionAgeCutoff(nativeHandle_); + } + + @Override + public Options setBlobGarbageCollectionForceThreshold( + final double blobGarbageCollectionForceThreshold) { + setBlobGarbageCollectionForceThreshold(nativeHandle_, blobGarbageCollectionForceThreshold); + return this; + } + + @Override + public double blobGarbageCollectionForceThreshold() { + return blobGarbageCollectionForceThreshold(nativeHandle_); + } + + @Override + public Options setBlobCompactionReadaheadSize(final long blobCompactionReadaheadSize) { + setBlobCompactionReadaheadSize(nativeHandle_, blobCompactionReadaheadSize); + return this; + } + + @Override + public long blobCompactionReadaheadSize() { + return blobCompactionReadaheadSize(nativeHandle_); + } + + @Override + public Options setBlobFileStartingLevel(final int blobFileStartingLevel) { + setBlobFileStartingLevel(nativeHandle_, blobFileStartingLevel); + return this; + } + + @Override + public int blobFileStartingLevel() { + return blobFileStartingLevel(nativeHandle_); + } + + @Override + public Options setPrepopulateBlobCache(final PrepopulateBlobCache prepopulateBlobCache) { + setPrepopulateBlobCache(nativeHandle_, prepopulateBlobCache.getValue()); + return this; + } + + @Override + public PrepopulateBlobCache prepopulateBlobCache() { + return PrepopulateBlobCache.getPrepopulateBlobCache(prepopulateBlobCache(nativeHandle_)); + } + + // + // END options for blobs (integrated BlobDB) + // + + private native static long newOptions(); + private native static long newOptions(long dbOptHandle, + long cfOptHandle); + private native static long copyOptions(long handle); + @Override protected final native void disposeInternal(final long handle); + private native void setEnv(long optHandle, long envHandle); + private native void prepareForBulkLoad(long handle); + + // DB native handles + private native void setIncreaseParallelism(long handle, int totalThreads); + private native void setCreateIfMissing(long handle, boolean flag); + private native boolean createIfMissing(long handle); + private native void setCreateMissingColumnFamilies( + long handle, boolean flag); + private native boolean createMissingColumnFamilies(long handle); + private native void setErrorIfExists(long handle, boolean errorIfExists); + private native boolean errorIfExists(long handle); + private native void setParanoidChecks( + long handle, boolean paranoidChecks); + private native boolean paranoidChecks(long handle); + private native void setRateLimiter(long handle, + long rateLimiterHandle); + private native void setSstFileManager(final long handle, + final long sstFileManagerHandle); + private native void setLogger(long handle, + long loggerHandle); + private native void setInfoLogLevel(long handle, byte logLevel); + private native byte infoLogLevel(long handle); + private native void setMaxOpenFiles(long handle, int maxOpenFiles); + private native int maxOpenFiles(long handle); + private native void setMaxTotalWalSize(long handle, + long maxTotalWalSize); + private native void setMaxFileOpeningThreads(final long handle, + final int maxFileOpeningThreads); + private native int maxFileOpeningThreads(final long handle); + private native long maxTotalWalSize(long handle); + private native void setStatistics(final long handle, final long statisticsHandle); + private native long statistics(final long handle); + private native boolean useFsync(long handle); + private native void setUseFsync(long handle, boolean useFsync); + private native void setDbPaths(final long handle, final String[] paths, + final long[] targetSizes); + private native long dbPathsLen(final long handle); + private native void dbPaths(final long handle, final String[] paths, + final long[] targetSizes); + private native void setDbLogDir(long handle, String dbLogDir); + private native String dbLogDir(long handle); + private native void setWalDir(long handle, String walDir); + private native String walDir(long handle); + private native void setDeleteObsoleteFilesPeriodMicros( + long handle, long micros); + private native long deleteObsoleteFilesPeriodMicros(long handle); + private native void setMaxBackgroundCompactions( + long handle, int maxBackgroundCompactions); + private native int maxBackgroundCompactions(long handle); + private native void setMaxSubcompactions(long handle, int maxSubcompactions); + private native int maxSubcompactions(long handle); + private native void setMaxBackgroundFlushes( + long handle, int maxBackgroundFlushes); + private native int maxBackgroundFlushes(long handle); + private native void setMaxBackgroundJobs(long handle, int maxMaxBackgroundJobs); + private native int maxBackgroundJobs(long handle); + private native void setMaxLogFileSize(long handle, long maxLogFileSize) + throws IllegalArgumentException; + private native long maxLogFileSize(long handle); + private native void setLogFileTimeToRoll( + long handle, long logFileTimeToRoll) throws IllegalArgumentException; + private native long logFileTimeToRoll(long handle); + private native void setKeepLogFileNum(long handle, long keepLogFileNum) + throws IllegalArgumentException; + private native long keepLogFileNum(long handle); + private native void setRecycleLogFileNum(long handle, long recycleLogFileNum); + private native long recycleLogFileNum(long handle); + private native void setMaxManifestFileSize( + long handle, long maxManifestFileSize); + private native long maxManifestFileSize(long handle); + private native void setMaxTableFilesSizeFIFO( + long handle, long maxTableFilesSize); + private native long maxTableFilesSizeFIFO(long handle); + private native void setTableCacheNumshardbits( + long handle, int tableCacheNumshardbits); + private native int tableCacheNumshardbits(long handle); + private native void setWalTtlSeconds(long handle, long walTtlSeconds); + private native long walTtlSeconds(long handle); + private native void setWalSizeLimitMB(long handle, long sizeLimitMB); + private native long walSizeLimitMB(long handle); + private static native void setMaxWriteBatchGroupSizeBytes( + final long handle, final long maxWriteBatchGroupSizeBytes); + private static native long maxWriteBatchGroupSizeBytes(final long handle); + private native void setManifestPreallocationSize( + long handle, long size) throws IllegalArgumentException; + private native long manifestPreallocationSize(long handle); + private native void setUseDirectReads(long handle, boolean useDirectReads); + private native boolean useDirectReads(long handle); + private native void setUseDirectIoForFlushAndCompaction( + long handle, boolean useDirectIoForFlushAndCompaction); + private native boolean useDirectIoForFlushAndCompaction(long handle); + private native void setAllowFAllocate(final long handle, + final boolean allowFAllocate); + private native boolean allowFAllocate(final long handle); + private native void setAllowMmapReads( + long handle, boolean allowMmapReads); + private native boolean allowMmapReads(long handle); + private native void setAllowMmapWrites( + long handle, boolean allowMmapWrites); + private native boolean allowMmapWrites(long handle); + private native void setIsFdCloseOnExec( + long handle, boolean isFdCloseOnExec); + private native boolean isFdCloseOnExec(long handle); + private native void setStatsDumpPeriodSec( + long handle, int statsDumpPeriodSec); + private native int statsDumpPeriodSec(long handle); + private native void setStatsPersistPeriodSec( + final long handle, final int statsPersistPeriodSec); + private native int statsPersistPeriodSec( + final long handle); + private native void setStatsHistoryBufferSize( + final long handle, final long statsHistoryBufferSize); + private native long statsHistoryBufferSize( + final long handle); + private native void setAdviseRandomOnOpen( + long handle, boolean adviseRandomOnOpen); + private native boolean adviseRandomOnOpen(long handle); + private native void setDbWriteBufferSize(final long handle, + final long dbWriteBufferSize); + private native void setWriteBufferManager(final long handle, + final long writeBufferManagerHandle); + private native long dbWriteBufferSize(final long handle); + private native void setAccessHintOnCompactionStart(final long handle, + final byte accessHintOnCompactionStart); + private native byte accessHintOnCompactionStart(final long handle); + private native void setCompactionReadaheadSize(final long handle, + final long compactionReadaheadSize); + private native long compactionReadaheadSize(final long handle); + private native void setRandomAccessMaxBufferSize(final long handle, + final long randomAccessMaxBufferSize); + private native long randomAccessMaxBufferSize(final long handle); + private native void setWritableFileMaxBufferSize(final long handle, + final long writableFileMaxBufferSize); + private native long writableFileMaxBufferSize(final long handle); + private native void setUseAdaptiveMutex( + long handle, boolean useAdaptiveMutex); + private native boolean useAdaptiveMutex(long handle); + private native void setBytesPerSync( + long handle, long bytesPerSync); + private native long bytesPerSync(long handle); + private native void setWalBytesPerSync(long handle, long walBytesPerSync); + private native long walBytesPerSync(long handle); + private native void setStrictBytesPerSync( + final long handle, final boolean strictBytesPerSync); + private native boolean strictBytesPerSync( + final long handle); + private static native void setEventListeners( + final long handle, final long[] eventListenerHandles); + private static native AbstractEventListener[] eventListeners(final long handle); + private native void setEnableThreadTracking(long handle, + boolean enableThreadTracking); + private native boolean enableThreadTracking(long handle); + private native void setDelayedWriteRate(long handle, long delayedWriteRate); + private native long delayedWriteRate(long handle); + private native void setEnablePipelinedWrite(final long handle, + final boolean pipelinedWrite); + private native boolean enablePipelinedWrite(final long handle); + private native void setUnorderedWrite(final long handle, + final boolean unorderedWrite); + private native boolean unorderedWrite(final long handle); + private native void setAllowConcurrentMemtableWrite(long handle, + boolean allowConcurrentMemtableWrite); + private native boolean allowConcurrentMemtableWrite(long handle); + private native void setEnableWriteThreadAdaptiveYield(long handle, + boolean enableWriteThreadAdaptiveYield); + private native boolean enableWriteThreadAdaptiveYield(long handle); + private native void setWriteThreadMaxYieldUsec(long handle, + long writeThreadMaxYieldUsec); + private native long writeThreadMaxYieldUsec(long handle); + private native void setWriteThreadSlowYieldUsec(long handle, + long writeThreadSlowYieldUsec); + private native long writeThreadSlowYieldUsec(long handle); + private native void setSkipStatsUpdateOnDbOpen(final long handle, + final boolean skipStatsUpdateOnDbOpen); + private native boolean skipStatsUpdateOnDbOpen(final long handle); + private static native void setSkipCheckingSstFileSizesOnDbOpen( + final long handle, final boolean skipChecking); + private static native boolean skipCheckingSstFileSizesOnDbOpen(final long handle); + private native void setWalRecoveryMode(final long handle, + final byte walRecoveryMode); + private native byte walRecoveryMode(final long handle); + private native void setAllow2pc(final long handle, + final boolean allow2pc); + private native boolean allow2pc(final long handle); + private native void setRowCache(final long handle, + final long rowCacheHandle); + private native void setWalFilter(final long handle, + final long walFilterHandle); + private native void setFailIfOptionsFileError(final long handle, + final boolean failIfOptionsFileError); + private native boolean failIfOptionsFileError(final long handle); + private native void setDumpMallocStats(final long handle, + final boolean dumpMallocStats); + private native boolean dumpMallocStats(final long handle); + private native void setAvoidFlushDuringRecovery(final long handle, + final boolean avoidFlushDuringRecovery); + private native boolean avoidFlushDuringRecovery(final long handle); + private native void setAvoidFlushDuringShutdown(final long handle, + final boolean avoidFlushDuringShutdown); + private native boolean avoidFlushDuringShutdown(final long handle); + private native void setAllowIngestBehind(final long handle, + final boolean allowIngestBehind); + private native boolean allowIngestBehind(final long handle); + private native void setTwoWriteQueues(final long handle, + final boolean twoWriteQueues); + private native boolean twoWriteQueues(final long handle); + private native void setManualWalFlush(final long handle, + final boolean manualWalFlush); + private native boolean manualWalFlush(final long handle); + + + // CF native handles + private static native void oldDefaults( + final long handle, final int majorVersion, final int minorVersion); + private native void optimizeForSmallDb(final long handle); + private static native void optimizeForSmallDb(final long handle, final long cacheHandle); + private native void optimizeForPointLookup(long handle, + long blockCacheSizeMb); + private native void optimizeLevelStyleCompaction(long handle, + long memtableMemoryBudget); + private native void optimizeUniversalStyleCompaction(long handle, + long memtableMemoryBudget); + private native void setComparatorHandle(long handle, int builtinComparator); + private native void setComparatorHandle(long optHandle, + long comparatorHandle, byte comparatorType); + private native void setMergeOperatorName( + long handle, String name); + private native void setMergeOperator( + long handle, long mergeOperatorHandle); + private native void setCompactionFilterHandle( + long handle, long compactionFilterHandle); + private native void setCompactionFilterFactoryHandle( + long handle, long compactionFilterFactoryHandle); + private native void setWriteBufferSize(long handle, long writeBufferSize) + throws IllegalArgumentException; + private native long writeBufferSize(long handle); + private native void setMaxWriteBufferNumber( + long handle, int maxWriteBufferNumber); + private native int maxWriteBufferNumber(long handle); + private native void setMinWriteBufferNumberToMerge( + long handle, int minWriteBufferNumberToMerge); + private native int minWriteBufferNumberToMerge(long handle); + private native void setCompressionType(long handle, byte compressionType); + private native byte compressionType(long handle); + private native void setCompressionPerLevel(long handle, + byte[] compressionLevels); + private native byte[] compressionPerLevel(long handle); + private native void setBottommostCompressionType(long handle, + byte bottommostCompressionType); + private native byte bottommostCompressionType(long handle); + private native void setBottommostCompressionOptions(final long handle, + final long bottommostCompressionOptionsHandle); + private native void setCompressionOptions(long handle, + long compressionOptionsHandle); + private native void useFixedLengthPrefixExtractor( + long handle, int prefixLength); + private native void useCappedPrefixExtractor( + long handle, int prefixLength); + private native void setNumLevels( + long handle, int numLevels); + private native int numLevels(long handle); + private native void setLevelZeroFileNumCompactionTrigger( + long handle, int numFiles); + private native int levelZeroFileNumCompactionTrigger(long handle); + private native void setLevelZeroSlowdownWritesTrigger( + long handle, int numFiles); + private native int levelZeroSlowdownWritesTrigger(long handle); + private native void setLevelZeroStopWritesTrigger( + long handle, int numFiles); + private native int levelZeroStopWritesTrigger(long handle); + private native void setTargetFileSizeBase( + long handle, long targetFileSizeBase); + private native long targetFileSizeBase(long handle); + private native void setTargetFileSizeMultiplier( + long handle, int multiplier); + private native int targetFileSizeMultiplier(long handle); + private native void setMaxBytesForLevelBase( + long handle, long maxBytesForLevelBase); + private native long maxBytesForLevelBase(long handle); + private native void setLevelCompactionDynamicLevelBytes( + long handle, boolean enableLevelCompactionDynamicLevelBytes); + private native boolean levelCompactionDynamicLevelBytes( + long handle); + private native void setMaxBytesForLevelMultiplier(long handle, double multiplier); + private native double maxBytesForLevelMultiplier(long handle); + private native void setMaxCompactionBytes(long handle, long maxCompactionBytes); + private native long maxCompactionBytes(long handle); + private native void setArenaBlockSize( + long handle, long arenaBlockSize) throws IllegalArgumentException; + private native long arenaBlockSize(long handle); + private native void setDisableAutoCompactions( + long handle, boolean disableAutoCompactions); + private native boolean disableAutoCompactions(long handle); + private native void setCompactionStyle(long handle, byte compactionStyle); + private native byte compactionStyle(long handle); + private native void setMaxSequentialSkipInIterations( + long handle, long maxSequentialSkipInIterations); + private native long maxSequentialSkipInIterations(long handle); + private native void setMemTableFactory(long handle, long factoryHandle); + private native String memTableFactoryName(long handle); + private native void setTableFactory(long handle, long factoryHandle); + private native String tableFactoryName(long handle); + private static native void setCfPaths( + final long handle, final String[] paths, final long[] targetSizes); + private static native long cfPathsLen(final long handle); + private static native void cfPaths( + final long handle, final String[] paths, final long[] targetSizes); + private native void setInplaceUpdateSupport( + long handle, boolean inplaceUpdateSupport); + private native boolean inplaceUpdateSupport(long handle); + private native void setInplaceUpdateNumLocks( + long handle, long inplaceUpdateNumLocks) + throws IllegalArgumentException; + private native long inplaceUpdateNumLocks(long handle); + private native void setMemtablePrefixBloomSizeRatio( + long handle, double memtablePrefixBloomSizeRatio); + private native double memtablePrefixBloomSizeRatio(long handle); + private native void setExperimentalMempurgeThreshold( + long handle, double experimentalMempurgeThreshold); + private native double experimentalMempurgeThreshold(long handle); + private native void setMemtableWholeKeyFiltering(long handle, boolean memtableWholeKeyFiltering); + private native boolean memtableWholeKeyFiltering(long handle); + private native void setBloomLocality( + long handle, int bloomLocality); + private native int bloomLocality(long handle); + private native void setMaxSuccessiveMerges( + long handle, long maxSuccessiveMerges) + throws IllegalArgumentException; + private native long maxSuccessiveMerges(long handle); + private native void setOptimizeFiltersForHits(long handle, + boolean optimizeFiltersForHits); + private native boolean optimizeFiltersForHits(long handle); + private native void setMemtableHugePageSize(long handle, + long memtableHugePageSize); + private native long memtableHugePageSize(long handle); + private native void setSoftPendingCompactionBytesLimit(long handle, + long softPendingCompactionBytesLimit); + private native long softPendingCompactionBytesLimit(long handle); + private native void setHardPendingCompactionBytesLimit(long handle, + long hardPendingCompactionBytesLimit); + private native long hardPendingCompactionBytesLimit(long handle); + private native void setLevel0FileNumCompactionTrigger(long handle, + int level0FileNumCompactionTrigger); + private native int level0FileNumCompactionTrigger(long handle); + private native void setLevel0SlowdownWritesTrigger(long handle, + int level0SlowdownWritesTrigger); + private native int level0SlowdownWritesTrigger(long handle); + private native void setLevel0StopWritesTrigger(long handle, + int level0StopWritesTrigger); + private native int level0StopWritesTrigger(long handle); + private native void setMaxBytesForLevelMultiplierAdditional(long handle, + int[] maxBytesForLevelMultiplierAdditional); + private native int[] maxBytesForLevelMultiplierAdditional(long handle); + private native void setParanoidFileChecks(long handle, + boolean paranoidFileChecks); + private native boolean paranoidFileChecks(long handle); + private native void setMaxWriteBufferNumberToMaintain(final long handle, + final int maxWriteBufferNumberToMaintain); + private native int maxWriteBufferNumberToMaintain(final long handle); + private native void setCompactionPriority(final long handle, + final byte compactionPriority); + private native byte compactionPriority(final long handle); + private native void setReportBgIoStats(final long handle, + final boolean reportBgIoStats); + private native boolean reportBgIoStats(final long handle); + private native void setTtl(final long handle, final long ttl); + private native long ttl(final long handle); + private native void setPeriodicCompactionSeconds( + final long handle, final long periodicCompactionSeconds); + private native long periodicCompactionSeconds(final long handle); + private native void setCompactionOptionsUniversal(final long handle, + final long compactionOptionsUniversalHandle); + private native void setCompactionOptionsFIFO(final long handle, + final long compactionOptionsFIFOHandle); + private native void setForceConsistencyChecks(final long handle, + final boolean forceConsistencyChecks); + private native boolean forceConsistencyChecks(final long handle); + private native void setAtomicFlush(final long handle, + final boolean atomicFlush); + private native boolean atomicFlush(final long handle); + private native void setSstPartitionerFactory(long nativeHandle_, long newFactoryHandle); + private static native void setCompactionThreadLimiter( + final long nativeHandle_, final long newLimiterHandle); + private static native void setAvoidUnnecessaryBlockingIO( + final long handle, final boolean avoidBlockingIO); + private static native boolean avoidUnnecessaryBlockingIO(final long handle); + private static native void setPersistStatsToDisk( + final long handle, final boolean persistStatsToDisk); + private static native boolean persistStatsToDisk(final long handle); + private static native void setWriteDbidToManifest( + final long handle, final boolean writeDbidToManifest); + private static native boolean writeDbidToManifest(final long handle); + private static native void setLogReadaheadSize(final long handle, final long logReadaheadSize); + private static native long logReadaheadSize(final long handle); + private static native void setBestEffortsRecovery( + final long handle, final boolean bestEffortsRecovery); + private static native boolean bestEffortsRecovery(final long handle); + private static native void setMaxBgErrorResumeCount( + final long handle, final int maxBgerrorRecumeCount); + private static native int maxBgerrorResumeCount(final long handle); + private static native void setBgerrorResumeRetryInterval( + final long handle, final long bgerrorResumeRetryInterval); + private static native long bgerrorResumeRetryInterval(final long handle); + + private native void setEnableBlobFiles(final long nativeHandle_, final boolean enableBlobFiles); + private native boolean enableBlobFiles(final long nativeHandle_); + private native void setMinBlobSize(final long nativeHandle_, final long minBlobSize); + private native long minBlobSize(final long nativeHandle_); + private native void setBlobFileSize(final long nativeHandle_, final long blobFileSize); + private native long blobFileSize(final long nativeHandle_); + private native void setBlobCompressionType(final long nativeHandle_, final byte compressionType); + private native byte blobCompressionType(final long nativeHandle_); + private native void setEnableBlobGarbageCollection( + final long nativeHandle_, final boolean enableBlobGarbageCollection); + private native boolean enableBlobGarbageCollection(final long nativeHandle_); + private native void setBlobGarbageCollectionAgeCutoff( + final long nativeHandle_, final double blobGarbageCollectionAgeCutoff); + private native double blobGarbageCollectionAgeCutoff(final long nativeHandle_); + private native void setBlobGarbageCollectionForceThreshold( + final long nativeHandle_, final double blobGarbageCollectionForceThreshold); + private native double blobGarbageCollectionForceThreshold(final long nativeHandle_); + private native void setBlobCompactionReadaheadSize( + final long nativeHandle_, final long blobCompactionReadaheadSize); + private native long blobCompactionReadaheadSize(final long nativeHandle_); + private native void setBlobFileStartingLevel( + final long nativeHandle_, final int blobFileStartingLevel); + private native int blobFileStartingLevel(final long nativeHandle_); + private native void setPrepopulateBlobCache( + final long nativeHandle_, final byte prepopulateBlobCache); + private native byte prepopulateBlobCache(final long nativeHandle_); + + // instance variables + // NOTE: If you add new member variables, please update the copy constructor above! + private Env env_; + private MemTableConfig memTableConfig_; + private TableFormatConfig tableFormatConfig_; + private RateLimiter rateLimiter_; + private AbstractComparator comparator_; + private AbstractCompactionFilter> compactionFilter_; + private AbstractCompactionFilterFactory> + compactionFilterFactory_; + private CompactionOptionsUniversal compactionOptionsUniversal_; + private CompactionOptionsFIFO compactionOptionsFIFO_; + private CompressionOptions bottommostCompressionOptions_; + private CompressionOptions compressionOptions_; + private Cache rowCache_; + private WalFilter walFilter_; + private WriteBufferManager writeBufferManager_; + private SstPartitionerFactory sstPartitionerFactory_; + private ConcurrentTaskLimiter compactionThreadLimiter_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/OptionsUtil.java b/src/rocksdb/java/src/main/java/org/rocksdb/OptionsUtil.java new file mode 100644 index 000000000..899996af9 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/OptionsUtil.java @@ -0,0 +1,184 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; + +public class OptionsUtil { + /** + * A static method to construct the DBOptions and ColumnFamilyDescriptors by + * loading the latest RocksDB options file stored in the specified rocksdb + * database. + * + * Note that the all the pointer options (except table_factory, which will + * be described in more details below) will be initialized with the default + * values. Developers can further initialize them after this function call. + * Below is an example list of pointer options which will be initialized. + * + * - env + * - memtable_factory + * - compaction_filter_factory + * - prefix_extractor + * - comparator + * - merge_operator + * - compaction_filter + * + * For table_factory, this function further supports deserializing + * BlockBasedTableFactory and its BlockBasedTableOptions except the + * pointer options of BlockBasedTableOptions (flush_block_policy_factory, + * block_cache, and block_cache_compressed), which will be initialized with + * default values. Developers can further specify these three options by + * casting the return value of TableFactoroy::GetOptions() to + * BlockBasedTableOptions and making necessary changes. + * + * @param dbPath the path to the RocksDB. + * @param env {@link org.rocksdb.Env} instance. + * @param dbOptions {@link org.rocksdb.DBOptions} instance. This will be + * filled and returned. + * @param cfDescs A list of {@link org.rocksdb.ColumnFamilyDescriptor}'s be + * returned. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + + public static void loadLatestOptions(String dbPath, Env env, DBOptions dbOptions, + List cfDescs) throws RocksDBException { + loadLatestOptions(dbPath, env, dbOptions, cfDescs, false); + } + + /** + * @param dbPath the path to the RocksDB. + * @param env {@link org.rocksdb.Env} instance. + * @param dbOptions {@link org.rocksdb.DBOptions} instance. This will be + * filled and returned. + * @param cfDescs A list of {@link org.rocksdb.ColumnFamilyDescriptor}'s be + * returned. + * @param ignoreUnknownOptions this flag can be set to true if you want to + * ignore options that are from a newer version of the db, essentially for + * forward compatibility. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static void loadLatestOptions(String dbPath, Env env, DBOptions dbOptions, + List cfDescs, boolean ignoreUnknownOptions) throws RocksDBException { + loadLatestOptions( + dbPath, env.nativeHandle_, dbOptions.nativeHandle_, cfDescs, ignoreUnknownOptions); + } + + /** + * Similar to LoadLatestOptions, this function constructs the DBOptions + * and ColumnFamilyDescriptors based on the specified RocksDB Options file. + * See LoadLatestOptions above. + * + * @param dbPath the path to the RocksDB. + * @param configOptions {@link org.rocksdb.ConfigOptions} instance. + * @param dbOptions {@link org.rocksdb.DBOptions} instance. This will be + * filled and returned. + * @param cfDescs A list of {@link org.rocksdb.ColumnFamilyDescriptor}'s be + * returned. + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static void loadLatestOptions(ConfigOptions configOptions, String dbPath, + DBOptions dbOptions, List cfDescs) throws RocksDBException { + loadLatestOptions(configOptions.nativeHandle_, dbPath, dbOptions.nativeHandle_, cfDescs); + } + + /** + * Similar to LoadLatestOptions, this function constructs the DBOptions + * and ColumnFamilyDescriptors based on the specified RocksDB Options file. + * See LoadLatestOptions above. + * + * @param optionsFileName the RocksDB options file path. + * @param env {@link org.rocksdb.Env} instance. + * @param dbOptions {@link org.rocksdb.DBOptions} instance. This will be + * filled and returned. + * @param cfDescs A list of {@link org.rocksdb.ColumnFamilyDescriptor}'s be + * returned. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static void loadOptionsFromFile(String optionsFileName, Env env, DBOptions dbOptions, + List cfDescs) throws RocksDBException { + loadOptionsFromFile(optionsFileName, env, dbOptions, cfDescs, false); + } + + /** + * @param optionsFileName the RocksDB options file path. + * @param env {@link org.rocksdb.Env} instance. + * @param dbOptions {@link org.rocksdb.DBOptions} instance. This will be + * filled and returned. + * @param cfDescs A list of {@link org.rocksdb.ColumnFamilyDescriptor}'s be + * returned. + * @param ignoreUnknownOptions this flag can be set to true if you want to + * ignore options that are from a newer version of the db, esentially for + * forward compatibility. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static void loadOptionsFromFile(String optionsFileName, Env env, DBOptions dbOptions, + List cfDescs, boolean ignoreUnknownOptions) throws RocksDBException { + loadOptionsFromFile( + optionsFileName, env.nativeHandle_, dbOptions.nativeHandle_, cfDescs, ignoreUnknownOptions); + } + + /** + * Similar to LoadLatestOptions, this function constructs the DBOptions + * and ColumnFamilyDescriptors based on the specified RocksDB Options file. + * See LoadLatestOptions above. + * + * @param optionsFileName the RocksDB options file path. + * @param configOptions {@link org.rocksdb.ConfigOptions} instance. + * @param dbOptions {@link org.rocksdb.DBOptions} instance. This will be + * filled and returned. + * @param cfDescs A list of {@link org.rocksdb.ColumnFamilyDescriptor}'s be + * returned. + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static void loadOptionsFromFile(ConfigOptions configOptions, String optionsFileName, + DBOptions dbOptions, List cfDescs) throws RocksDBException { + loadOptionsFromFile( + configOptions.nativeHandle_, optionsFileName, dbOptions.nativeHandle_, cfDescs); + } + + /** + * Returns the latest options file name under the specified RocksDB path. + * + * @param dbPath the path to the RocksDB. + * @param env {@link org.rocksdb.Env} instance. + * @return the latest options file name under the db path. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static String getLatestOptionsFileName(String dbPath, Env env) throws RocksDBException { + return getLatestOptionsFileName(dbPath, env.nativeHandle_); + } + + /** + * Private constructor. + * This class has only static methods and shouldn't be instantiated. + */ + private OptionsUtil() {} + + // native methods + private native static void loadLatestOptions(String dbPath, long envHandle, long dbOptionsHandle, + List cfDescs, boolean ignoreUnknownOptions) throws RocksDBException; + private native static void loadLatestOptions(long cfgHandle, String dbPath, long dbOptionsHandle, + List cfDescs) throws RocksDBException; + private native static void loadOptionsFromFile(String optionsFileName, long envHandle, + long dbOptionsHandle, List cfDescs, boolean ignoreUnknownOptions) + throws RocksDBException; + private native static void loadOptionsFromFile(long cfgHandle, String optionsFileName, + long dbOptionsHandle, List cfDescs) throws RocksDBException; + private native static String getLatestOptionsFileName(String dbPath, long envHandle) + throws RocksDBException; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/PersistentCache.java b/src/rocksdb/java/src/main/java/org/rocksdb/PersistentCache.java new file mode 100644 index 000000000..aed565297 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/PersistentCache.java @@ -0,0 +1,26 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Persistent cache for caching IO pages on a persistent medium. The + * cache is specifically designed for persistent read cache. + */ +public class PersistentCache extends RocksObject { + + public PersistentCache(final Env env, final String path, final long size, + final Logger logger, final boolean optimizedForNvm) + throws RocksDBException { + super(newPersistentCache(env.nativeHandle_, path, size, + logger.nativeHandle_, optimizedForNvm)); + } + + private native static long newPersistentCache(final long envHandle, + final String path, final long size, final long loggerHandle, + final boolean optimizedForNvm) throws RocksDBException; + + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java new file mode 100644 index 000000000..c09998167 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java @@ -0,0 +1,251 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +/** + * The config for plain table sst format. + * + *

PlainTable is a RocksDB's SST file format optimized for low query + * latency on pure-memory or really low-latency media.

+ * + *

It also support prefix hash feature.

+ */ +public class PlainTableConfig extends TableFormatConfig { + public static final int VARIABLE_LENGTH = 0; + public static final int DEFAULT_BLOOM_BITS_PER_KEY = 10; + public static final double DEFAULT_HASH_TABLE_RATIO = 0.75; + public static final int DEFAULT_INDEX_SPARSENESS = 16; + public static final int DEFAULT_HUGE_TLB_SIZE = 0; + public static final EncodingType DEFAULT_ENCODING_TYPE = + EncodingType.kPlain; + public static final boolean DEFAULT_FULL_SCAN_MODE = false; + public static final boolean DEFAULT_STORE_INDEX_IN_FILE + = false; + + public PlainTableConfig() { + keySize_ = VARIABLE_LENGTH; + bloomBitsPerKey_ = DEFAULT_BLOOM_BITS_PER_KEY; + hashTableRatio_ = DEFAULT_HASH_TABLE_RATIO; + indexSparseness_ = DEFAULT_INDEX_SPARSENESS; + hugePageTlbSize_ = DEFAULT_HUGE_TLB_SIZE; + encodingType_ = DEFAULT_ENCODING_TYPE; + fullScanMode_ = DEFAULT_FULL_SCAN_MODE; + storeIndexInFile_ = DEFAULT_STORE_INDEX_IN_FILE; + } + + /** + *

Set the length of the user key. If it is set to be + * VARIABLE_LENGTH, then it indicates the user keys are + * of variable length.

+ * + *

Otherwise,all the keys need to have the same length + * in byte.

+ * + *

DEFAULT: VARIABLE_LENGTH

+ * + * @param keySize the length of the user key. + * @return the reference to the current config. + */ + public PlainTableConfig setKeySize(int keySize) { + keySize_ = keySize; + return this; + } + + /** + * @return the specified size of the user key. If VARIABLE_LENGTH, + * then it indicates variable-length key. + */ + public int keySize() { + return keySize_; + } + + /** + * Set the number of bits per key used by the internal bloom filter + * in the plain table sst format. + * + * @param bitsPerKey the number of bits per key for bloom filer. + * @return the reference to the current config. + */ + public PlainTableConfig setBloomBitsPerKey(int bitsPerKey) { + bloomBitsPerKey_ = bitsPerKey; + return this; + } + + /** + * @return the number of bits per key used for the bloom filter. + */ + public int bloomBitsPerKey() { + return bloomBitsPerKey_; + } + + /** + * hashTableRatio is the desired utilization of the hash table used + * for prefix hashing. The ideal ratio would be the number of + * prefixes / the number of hash buckets. If this value is set to + * zero, then hash table will not be used. + * + * @param ratio the hash table ratio. + * @return the reference to the current config. + */ + public PlainTableConfig setHashTableRatio(double ratio) { + hashTableRatio_ = ratio; + return this; + } + + /** + * @return the hash table ratio. + */ + public double hashTableRatio() { + return hashTableRatio_; + } + + /** + * Index sparseness determines the index interval for keys inside the + * same prefix. This number is equal to the maximum number of linear + * search required after hash and binary search. If it's set to 0, + * then each key will be indexed. + * + * @param sparseness the index sparseness. + * @return the reference to the current config. + */ + public PlainTableConfig setIndexSparseness(int sparseness) { + indexSparseness_ = sparseness; + return this; + } + + /** + * @return the index sparseness. + */ + public long indexSparseness() { + return indexSparseness_; + } + + /** + *

huge_page_tlb_size: if ≤0, allocate hash indexes and blooms + * from malloc otherwise from huge page TLB.

+ * + *

The user needs to reserve huge pages for it to be allocated, + * like: {@code sysctl -w vm.nr_hugepages=20}

+ * + *

See linux doc Documentation/vm/hugetlbpage.txt

+ * + * @param hugePageTlbSize huge page tlb size + * @return the reference to the current config. + */ + public PlainTableConfig setHugePageTlbSize(int hugePageTlbSize) { + this.hugePageTlbSize_ = hugePageTlbSize; + return this; + } + + /** + * Returns the value for huge page tlb size + * + * @return hugePageTlbSize + */ + public int hugePageTlbSize() { + return hugePageTlbSize_; + } + + /** + * Sets the encoding type. + * + *

This setting determines how to encode + * the keys. See enum {@link EncodingType} for + * the choices.

+ * + *

The value will determine how to encode keys + * when writing to a new SST file. This value will be stored + * inside the SST file which will be used when reading from + * the file, which makes it possible for users to choose + * different encoding type when reopening a DB. Files with + * different encoding types can co-exist in the same DB and + * can be read.

+ * + * @param encodingType {@link org.rocksdb.EncodingType} value. + * @return the reference to the current config. + */ + public PlainTableConfig setEncodingType(EncodingType encodingType) { + this.encodingType_ = encodingType; + return this; + } + + /** + * Returns the active EncodingType + * + * @return currently set encoding type + */ + public EncodingType encodingType() { + return encodingType_; + } + + /** + * Set full scan mode, if true the whole file will be read + * one record by one without using the index. + * + * @param fullScanMode boolean value indicating if full + * scan mode shall be enabled. + * @return the reference to the current config. + */ + public PlainTableConfig setFullScanMode(boolean fullScanMode) { + this.fullScanMode_ = fullScanMode; + return this; + } + + /** + * Return if full scan mode is active + * @return boolean value indicating if the full scan mode is + * enabled. + */ + public boolean fullScanMode() { + return fullScanMode_; + } + + /** + *

If set to true: compute plain table index and bloom + * filter during file building and store it in file. + * When reading file, index will be mmaped instead + * of doing recomputation.

+ * + * @param storeIndexInFile value indicating if index shall + * be stored in a file + * @return the reference to the current config. + */ + public PlainTableConfig setStoreIndexInFile(boolean storeIndexInFile) { + this.storeIndexInFile_ = storeIndexInFile; + return this; + } + + /** + * Return a boolean value indicating if index shall be stored + * in a file. + * + * @return currently set value for store index in file. + */ + public boolean storeIndexInFile() { + return storeIndexInFile_; + } + + @Override protected long newTableFactoryHandle() { + return newTableFactoryHandle(keySize_, bloomBitsPerKey_, + hashTableRatio_, indexSparseness_, hugePageTlbSize_, + encodingType_.getValue(), fullScanMode_, + storeIndexInFile_); + } + + private native long newTableFactoryHandle( + int keySize, int bloomBitsPerKey, + double hashTableRatio, int indexSparseness, + int hugePageTlbSize, byte encodingType, + boolean fullScanMode, boolean storeIndexInFile); + + private int keySize_; + private int bloomBitsPerKey_; + private double hashTableRatio_; + private int indexSparseness_; + private int hugePageTlbSize_; + private EncodingType encodingType_; + private boolean fullScanMode_; + private boolean storeIndexInFile_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java b/src/rocksdb/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java new file mode 100644 index 000000000..f1237aa7c --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/PrepopulateBlobCache.java @@ -0,0 +1,117 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Enum PrepopulateBlobCache + * + *

Prepopulate warm/hot blobs which are already in memory into blob + * cache at the time of flush. On a flush, the blob that is in memory + * (in memtables) get flushed to the device. If using Direct IO, + * additional IO is incurred to read this blob back into memory again, + * which is avoided by enabling this option. This further helps if the + * workload exhibits high temporal locality, where most of the reads go + * to recently written data. This also helps in case of the remote file + * system since it involves network traffic and higher latencies.

+ */ +public enum PrepopulateBlobCache { + PREPOPULATE_BLOB_DISABLE((byte) 0x0, "prepopulate_blob_disable", "kDisable"), + PREPOPULATE_BLOB_FLUSH_ONLY((byte) 0x1, "prepopulate_blob_flush_only", "kFlushOnly"); + + /** + *

Get the PrepopulateBlobCache enumeration value by + * passing the library name to this method.

+ * + *

If library cannot be found the enumeration + * value {@code PREPOPULATE_BLOB_DISABLE} will be returned.

+ * + * @param libraryName prepopulate blob cache library name. + * + * @return PrepopulateBlobCache instance. + */ + public static PrepopulateBlobCache getPrepopulateBlobCache(String libraryName) { + if (libraryName != null) { + for (PrepopulateBlobCache prepopulateBlobCache : PrepopulateBlobCache.values()) { + if (prepopulateBlobCache.getLibraryName() != null + && prepopulateBlobCache.getLibraryName().equals(libraryName)) { + return prepopulateBlobCache; + } + } + } + return PrepopulateBlobCache.PREPOPULATE_BLOB_DISABLE; + } + + /** + *

Get the PrepopulateBlobCache enumeration value by + * passing the byte identifier to this method.

+ * + * @param byteIdentifier of PrepopulateBlobCache. + * + * @return PrepopulateBlobCache instance. + * + * @throws IllegalArgumentException If PrepopulateBlobCache cannot be found for the + * provided byteIdentifier + */ + public static PrepopulateBlobCache getPrepopulateBlobCache(byte byteIdentifier) { + for (final PrepopulateBlobCache prepopulateBlobCache : PrepopulateBlobCache.values()) { + if (prepopulateBlobCache.getValue() == byteIdentifier) { + return prepopulateBlobCache; + } + } + + throw new IllegalArgumentException("Illegal value provided for PrepopulateBlobCache."); + } + + /** + *

Get a PrepopulateBlobCache value based on the string key in the C++ options output. + * This gets used in support of getting options into Java from an options string, + * which is generated at the C++ level. + *

+ * + * @param internalName the internal (C++) name by which the option is known. + * + * @return PrepopulateBlobCache instance (optional) + */ + static PrepopulateBlobCache getFromInternal(final String internalName) { + for (final PrepopulateBlobCache prepopulateBlobCache : PrepopulateBlobCache.values()) { + if (prepopulateBlobCache.internalName_.equals(internalName)) { + return prepopulateBlobCache; + } + } + + throw new IllegalArgumentException( + "Illegal internalName '" + internalName + " ' provided for PrepopulateBlobCache."); + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + /** + *

Returns the library name of the prepopulate blob cache mode + * identified by the enumeration value.

+ * + * @return library name + */ + public String getLibraryName() { + return libraryName_; + } + + PrepopulateBlobCache(final byte value, final String libraryName, final String internalName) { + value_ = value; + libraryName_ = libraryName; + internalName_ = internalName; + } + + private final byte value_; + private final String libraryName_; + private final String internalName_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Priority.java b/src/rocksdb/java/src/main/java/org/rocksdb/Priority.java new file mode 100644 index 000000000..34a56edcb --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Priority.java @@ -0,0 +1,49 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The Thread Pool priority. + */ +public enum Priority { + BOTTOM((byte) 0x0), + LOW((byte) 0x1), + HIGH((byte)0x2), + TOTAL((byte)0x3); + + private final byte value; + + Priority(final byte value) { + this.value = value; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + byte getValue() { + return value; + } + + /** + * Get Priority by byte value. + * + * @param value byte representation of Priority. + * + * @return {@link org.rocksdb.Priority} instance. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + static Priority getPriority(final byte value) { + for (final Priority priority : Priority.values()) { + if (priority.getValue() == value){ + return priority; + } + } + throw new IllegalArgumentException("Illegal value provided for Priority."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Range.java b/src/rocksdb/java/src/main/java/org/rocksdb/Range.java new file mode 100644 index 000000000..74c85e5f0 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Range.java @@ -0,0 +1,19 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Range from start to limit. + */ +public class Range { + final Slice start; + final Slice limit; + + public Range(final Slice start, final Slice limit) { + this.start = start; + this.limit = limit; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java b/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java new file mode 100644 index 000000000..c2b8a0fd9 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiter.java @@ -0,0 +1,227 @@ +// Copyright (c) 2015, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * RateLimiter, which is used to control write rate of flush and + * compaction. + * + * @since 3.10.0 + */ +public class RateLimiter extends RocksObject { + public static final long DEFAULT_REFILL_PERIOD_MICROS = 100 * 1000; + public static final int DEFAULT_FAIRNESS = 10; + public static final RateLimiterMode DEFAULT_MODE = + RateLimiterMode.WRITES_ONLY; + public static final boolean DEFAULT_AUTOTUNE = false; + + /** + * RateLimiter constructor + * + * @param rateBytesPerSecond this is the only parameter you want to set + * most of the time. It controls the total write rate of compaction + * and flush in bytes per second. Currently, RocksDB does not enforce + * rate limit for anything other than flush and compaction, e.g. write to + * WAL. + */ + public RateLimiter(final long rateBytesPerSecond) { + this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS, + DEFAULT_MODE, DEFAULT_AUTOTUNE); + } + + /** + * RateLimiter constructor + * + * @param rateBytesPerSecond this is the only parameter you want to set + * most of the time. It controls the total write rate of compaction + * and flush in bytes per second. Currently, RocksDB does not enforce + * rate limit for anything other than flush and compaction, e.g. write to + * WAL. + * @param refillPeriodMicros this controls how often tokens are refilled. For + * example, + * when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to + * 100ms, then 1MB is refilled every 100ms internally. Larger value can + * lead to burstier writes while smaller value introduces more CPU + * overhead. The default of 100,000ms should work for most cases. + */ + public RateLimiter(final long rateBytesPerSecond, + final long refillPeriodMicros) { + this(rateBytesPerSecond, refillPeriodMicros, DEFAULT_FAIRNESS, DEFAULT_MODE, + DEFAULT_AUTOTUNE); + } + + /** + * RateLimiter constructor + * + * @param rateBytesPerSecond this is the only parameter you want to set + * most of the time. It controls the total write rate of compaction + * and flush in bytes per second. Currently, RocksDB does not enforce + * rate limit for anything other than flush and compaction, e.g. write to + * WAL. + * @param refillPeriodMicros this controls how often tokens are refilled. For + * example, + * when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to + * 100ms, then 1MB is refilled every 100ms internally. Larger value can + * lead to burstier writes while smaller value introduces more CPU + * overhead. The default of 100,000ms should work for most cases. + * @param fairness RateLimiter accepts high-pri requests and low-pri requests. + * A low-pri request is usually blocked in favor of hi-pri request. + * Currently, RocksDB assigns low-pri to request from compaction and + * high-pri to request from flush. Low-pri requests can get blocked if + * flush requests come in continuously. This fairness parameter grants + * low-pri requests permission by fairness chance even though high-pri + * requests exist to avoid starvation. + * You should be good by leaving it at default 10. + */ + public RateLimiter(final long rateBytesPerSecond, + final long refillPeriodMicros, final int fairness) { + this(rateBytesPerSecond, refillPeriodMicros, fairness, DEFAULT_MODE, + DEFAULT_AUTOTUNE); + } + + /** + * RateLimiter constructor + * + * @param rateBytesPerSecond this is the only parameter you want to set + * most of the time. It controls the total write rate of compaction + * and flush in bytes per second. Currently, RocksDB does not enforce + * rate limit for anything other than flush and compaction, e.g. write to + * WAL. + * @param refillPeriodMicros this controls how often tokens are refilled. For + * example, + * when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to + * 100ms, then 1MB is refilled every 100ms internally. Larger value can + * lead to burstier writes while smaller value introduces more CPU + * overhead. The default of 100,000ms should work for most cases. + * @param fairness RateLimiter accepts high-pri requests and low-pri requests. + * A low-pri request is usually blocked in favor of hi-pri request. + * Currently, RocksDB assigns low-pri to request from compaction and + * high-pri to request from flush. Low-pri requests can get blocked if + * flush requests come in continuously. This fairness parameter grants + * low-pri requests permission by fairness chance even though high-pri + * requests exist to avoid starvation. + * You should be good by leaving it at default 10. + * @param rateLimiterMode indicates which types of operations count against + * the limit. + */ + public RateLimiter(final long rateBytesPerSecond, + final long refillPeriodMicros, final int fairness, + final RateLimiterMode rateLimiterMode) { + this(rateBytesPerSecond, refillPeriodMicros, fairness, rateLimiterMode, + DEFAULT_AUTOTUNE); + } + + /** + * RateLimiter constructor + * + * @param rateBytesPerSecond this is the only parameter you want to set + * most of the time. It controls the total write rate of compaction + * and flush in bytes per second. Currently, RocksDB does not enforce + * rate limit for anything other than flush and compaction, e.g. write to + * WAL. + * @param refillPeriodMicros this controls how often tokens are refilled. For + * example, + * when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to + * 100ms, then 1MB is refilled every 100ms internally. Larger value can + * lead to burstier writes while smaller value introduces more CPU + * overhead. The default of 100,000ms should work for most cases. + * @param fairness RateLimiter accepts high-pri requests and low-pri requests. + * A low-pri request is usually blocked in favor of hi-pri request. + * Currently, RocksDB assigns low-pri to request from compaction and + * high-pri to request from flush. Low-pri requests can get blocked if + * flush requests come in continuously. This fairness parameter grants + * low-pri requests permission by fairness chance even though high-pri + * requests exist to avoid starvation. + * You should be good by leaving it at default 10. + * @param rateLimiterMode indicates which types of operations count against + * the limit. + * @param autoTune Enables dynamic adjustment of rate limit within the range + * {@code [rate_bytes_per_sec / 20, rate_bytes_per_sec]}, according to + * the recent demand for background I/O. + */ + public RateLimiter(final long rateBytesPerSecond, + final long refillPeriodMicros, final int fairness, + final RateLimiterMode rateLimiterMode, final boolean autoTune) { + super(newRateLimiterHandle(rateBytesPerSecond, + refillPeriodMicros, fairness, rateLimiterMode.getValue(), autoTune)); + } + + /** + *

This API allows user to dynamically change rate limiter's bytes per second. + * REQUIRED: bytes_per_second > 0

+ * + * @param bytesPerSecond bytes per second. + */ + public void setBytesPerSecond(final long bytesPerSecond) { + assert(isOwningHandle()); + setBytesPerSecond(nativeHandle_, bytesPerSecond); + } + + /** + * Returns the bytes per second. + * + * @return bytes per second. + */ + public long getBytesPerSecond() { + assert(isOwningHandle()); + return getBytesPerSecond(nativeHandle_); + } + + /** + *

Request for token to write bytes. If this request can not be satisfied, + * the call is blocked. Caller is responsible to make sure + * {@code bytes < GetSingleBurstBytes()}.

+ * + * @param bytes requested bytes. + */ + public void request(final long bytes) { + assert(isOwningHandle()); + request(nativeHandle_, bytes); + } + + /** + *

Max bytes can be granted in a single burst.

+ * + * @return max bytes can be granted in a single burst. + */ + public long getSingleBurstBytes() { + assert(isOwningHandle()); + return getSingleBurstBytes(nativeHandle_); + } + + /** + *

Total bytes that go through rate limiter.

+ * + * @return total bytes that go through rate limiter. + */ + public long getTotalBytesThrough() { + assert(isOwningHandle()); + return getTotalBytesThrough(nativeHandle_); + } + + /** + *

Total # of requests that go through rate limiter.

+ * + * @return total # of requests that go through rate limiter. + */ + public long getTotalRequests() { + assert(isOwningHandle()); + return getTotalRequests(nativeHandle_); + } + + private static native long newRateLimiterHandle(final long rateBytesPerSecond, + final long refillPeriodMicros, final int fairness, + final byte rateLimiterMode, final boolean autoTune); + @Override protected final native void disposeInternal(final long handle); + + private native void setBytesPerSecond(final long handle, + final long bytesPerSecond); + private native long getBytesPerSecond(final long handle); + private native void request(final long handle, final long bytes); + private native long getSingleBurstBytes(final long handle); + private native long getTotalBytesThrough(final long handle); + private native long getTotalRequests(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiterMode.java b/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiterMode.java new file mode 100644 index 000000000..4b029d816 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RateLimiterMode.java @@ -0,0 +1,52 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Mode for {@link RateLimiter#RateLimiter(long, long, int, RateLimiterMode)}. + */ +public enum RateLimiterMode { + READS_ONLY((byte)0x0), + WRITES_ONLY((byte)0x1), + ALL_IO((byte)0x2); + + private final byte value; + + RateLimiterMode(final byte value) { + this.value = value; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + *

Get the RateLimiterMode enumeration value by + * passing the byte identifier to this method.

+ * + * @param byteIdentifier of RateLimiterMode. + * + * @return AccessHint instance. + * + * @throws IllegalArgumentException if the access hint for the byteIdentifier + * cannot be found + */ + public static RateLimiterMode getRateLimiterMode(final byte byteIdentifier) { + for (final RateLimiterMode rateLimiterMode : RateLimiterMode.values()) { + if (rateLimiterMode.getValue() == byteIdentifier) { + return rateLimiterMode; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for RateLimiterMode."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java new file mode 100755 index 000000000..0836f0f18 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java @@ -0,0 +1,831 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The class that controls the get behavior. + * + * Note that dispose() must be called before an Options instance + * become out-of-scope to release the allocated memory in c++. + */ +public class ReadOptions extends RocksObject { + public ReadOptions() { + super(newReadOptions()); + } + + /** + * @param verifyChecksums verification will be performed on every read + * when set to true + * @param fillCache if true, then fill-cache behavior will be performed. + */ + public ReadOptions(final boolean verifyChecksums, final boolean fillCache) { + super(newReadOptions(verifyChecksums, fillCache)); + } + + /** + * Copy constructor. + * + * NOTE: This does a shallow copy, which means snapshot, iterate_upper_bound + * and other pointers will be cloned! + * + * @param other The ReadOptions to copy. + */ + public ReadOptions(ReadOptions other) { + super(copyReadOptions(other.nativeHandle_)); + this.iterateLowerBoundSlice_ = other.iterateLowerBoundSlice_; + this.iterateUpperBoundSlice_ = other.iterateUpperBoundSlice_; + this.timestampSlice_ = other.timestampSlice_; + this.iterStartTs_ = other.iterStartTs_; + } + + /** + * If true, all data read from underlying storage will be + * verified against corresponding checksums. + * Default: true + * + * @return true if checksum verification is on. + */ + public boolean verifyChecksums() { + assert(isOwningHandle()); + return verifyChecksums(nativeHandle_); + } + + /** + * If true, all data read from underlying storage will be + * verified against corresponding checksums. + * Default: true + * + * @param verifyChecksums if true, then checksum verification + * will be performed on every read. + * @return the reference to the current ReadOptions. + */ + public ReadOptions setVerifyChecksums( + final boolean verifyChecksums) { + assert(isOwningHandle()); + setVerifyChecksums(nativeHandle_, verifyChecksums); + return this; + } + + // TODO(yhchiang): this option seems to be block-based table only. + // move this to a better place? + /** + * Fill the cache when loading the block-based sst formated db. + * Callers may wish to set this field to false for bulk scans. + * Default: true + * + * @return true if the fill-cache behavior is on. + */ + public boolean fillCache() { + assert(isOwningHandle()); + return fillCache(nativeHandle_); + } + + /** + * Fill the cache when loading the block-based sst formatted db. + * Callers may wish to set this field to false for bulk scans. + * Default: true + * + * @param fillCache if true, then fill-cache behavior will be + * performed. + * @return the reference to the current ReadOptions. + */ + public ReadOptions setFillCache(final boolean fillCache) { + assert(isOwningHandle()); + setFillCache(nativeHandle_, fillCache); + return this; + } + + /** + * Returns the currently assigned Snapshot instance. + * + * @return the Snapshot assigned to this instance. If no Snapshot + * is assigned null. + */ + public Snapshot snapshot() { + assert(isOwningHandle()); + long snapshotHandle = snapshot(nativeHandle_); + if (snapshotHandle != 0) { + return new Snapshot(snapshotHandle); + } + return null; + } + + /** + *

If "snapshot" is non-nullptr, read as of the supplied snapshot + * (which must belong to the DB that is being read and which must + * not have been released). If "snapshot" is nullptr, use an implicit + * snapshot of the state at the beginning of this read operation.

+ *

Default: null

+ * + * @param snapshot {@link Snapshot} instance + * @return the reference to the current ReadOptions. + */ + public ReadOptions setSnapshot(final Snapshot snapshot) { + assert(isOwningHandle()); + if (snapshot != null) { + setSnapshot(nativeHandle_, snapshot.nativeHandle_); + } else { + setSnapshot(nativeHandle_, 0l); + } + return this; + } + + /** + * Returns the current read tier. + * + * @return the read tier in use, by default {@link ReadTier#READ_ALL_TIER} + */ + public ReadTier readTier() { + assert(isOwningHandle()); + return ReadTier.getReadTier(readTier(nativeHandle_)); + } + + /** + * Specify if this read request should process data that ALREADY + * resides on a particular cache. If the required data is not + * found at the specified cache, then {@link RocksDBException} is thrown. + * + * @param readTier {@link ReadTier} instance + * @return the reference to the current ReadOptions. + */ + public ReadOptions setReadTier(final ReadTier readTier) { + assert(isOwningHandle()); + setReadTier(nativeHandle_, readTier.getValue()); + return this; + } + + /** + * Specify to create a tailing iterator -- a special iterator that has a + * view of the complete database (i.e. it can also be used to read newly + * added data) and is optimized for sequential reads. It will return records + * that were inserted into the database after the creation of the iterator. + * Default: false + * + * Not supported in {@code ROCKSDB_LITE} mode! + * + * @return true if tailing iterator is enabled. + */ + public boolean tailing() { + assert(isOwningHandle()); + return tailing(nativeHandle_); + } + + /** + * Specify to create a tailing iterator -- a special iterator that has a + * view of the complete database (i.e. it can also be used to read newly + * added data) and is optimized for sequential reads. It will return records + * that were inserted into the database after the creation of the iterator. + * Default: false + * Not supported in ROCKSDB_LITE mode! + * + * @param tailing if true, then tailing iterator will be enabled. + * @return the reference to the current ReadOptions. + */ + public ReadOptions setTailing(final boolean tailing) { + assert(isOwningHandle()); + setTailing(nativeHandle_, tailing); + return this; + } + + /** + * Returns whether managed iterators will be used. + * + * @return the setting of whether managed iterators will be used, + * by default false + * + * @deprecated This options is not used anymore. + */ + @Deprecated + public boolean managed() { + assert(isOwningHandle()); + return managed(nativeHandle_); + } + + /** + * Specify to create a managed iterator -- a special iterator that + * uses less resources by having the ability to free its underlying + * resources on request. + * + * @param managed if true, then managed iterators will be enabled. + * @return the reference to the current ReadOptions. + * + * @deprecated This options is not used anymore. + */ + @Deprecated + public ReadOptions setManaged(final boolean managed) { + assert(isOwningHandle()); + setManaged(nativeHandle_, managed); + return this; + } + + /** + * Returns whether a total seek order will be used + * + * @return the setting of whether a total seek order will be used + */ + public boolean totalOrderSeek() { + assert(isOwningHandle()); + return totalOrderSeek(nativeHandle_); + } + + /** + * Enable a total order seek regardless of index format (e.g. hash index) + * used in the table. Some table format (e.g. plain table) may not support + * this option. + * + * @param totalOrderSeek if true, then total order seek will be enabled. + * @return the reference to the current ReadOptions. + */ + public ReadOptions setTotalOrderSeek(final boolean totalOrderSeek) { + assert(isOwningHandle()); + setTotalOrderSeek(nativeHandle_, totalOrderSeek); + return this; + } + + /** + * Returns whether the iterator only iterates over the same prefix as the seek + * + * @return the setting of whether the iterator only iterates over the same + * prefix as the seek, default is false + */ + public boolean prefixSameAsStart() { + assert(isOwningHandle()); + return prefixSameAsStart(nativeHandle_); + } + + /** + * Enforce that the iterator only iterates over the same prefix as the seek. + * This option is effective only for prefix seeks, i.e. prefix_extractor is + * non-null for the column family and {@link #totalOrderSeek()} is false. + * Unlike iterate_upper_bound, {@link #setPrefixSameAsStart(boolean)} only + * works within a prefix but in both directions. + * + * @param prefixSameAsStart if true, then the iterator only iterates over the + * same prefix as the seek + * @return the reference to the current ReadOptions. + */ + public ReadOptions setPrefixSameAsStart(final boolean prefixSameAsStart) { + assert(isOwningHandle()); + setPrefixSameAsStart(nativeHandle_, prefixSameAsStart); + return this; + } + + /** + * Returns whether the blocks loaded by the iterator will be pinned in memory + * + * @return the setting of whether the blocks loaded by the iterator will be + * pinned in memory + */ + public boolean pinData() { + assert(isOwningHandle()); + return pinData(nativeHandle_); + } + + /** + * Keep the blocks loaded by the iterator pinned in memory as long as the + * iterator is not deleted, If used when reading from tables created with + * BlockBasedTableOptions::use_delta_encoding = false, + * Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to + * return 1. + * + * @param pinData if true, the blocks loaded by the iterator will be pinned + * @return the reference to the current ReadOptions. + */ + public ReadOptions setPinData(final boolean pinData) { + assert(isOwningHandle()); + setPinData(nativeHandle_, pinData); + return this; + } + + /** + * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we + * schedule a background job in the flush job queue and delete obsolete files + * in background. + * + * Default: false + * + * @return true when PurgeObsoleteFile is called in CleanupIteratorState + */ + public boolean backgroundPurgeOnIteratorCleanup() { + assert(isOwningHandle()); + return backgroundPurgeOnIteratorCleanup(nativeHandle_); + } + + /** + * If true, when PurgeObsoleteFile is called in CleanupIteratorState, we + * schedule a background job in the flush job queue and delete obsolete files + * in background. + * + * Default: false + * + * @param backgroundPurgeOnIteratorCleanup true when PurgeObsoleteFile is + * called in CleanupIteratorState + * @return the reference to the current ReadOptions. + */ + public ReadOptions setBackgroundPurgeOnIteratorCleanup( + final boolean backgroundPurgeOnIteratorCleanup) { + assert(isOwningHandle()); + setBackgroundPurgeOnIteratorCleanup(nativeHandle_, + backgroundPurgeOnIteratorCleanup); + return this; + } + + /** + * If non-zero, NewIterator will create a new table reader which + * performs reads of the given size. Using a large size (> 2MB) can + * improve the performance of forward iteration on spinning disks. + * + * Default: 0 + * + * @return The readahead size is bytes + */ + public long readaheadSize() { + assert(isOwningHandle()); + return readaheadSize(nativeHandle_); + } + + /** + * If non-zero, NewIterator will create a new table reader which + * performs reads of the given size. Using a large size (> 2MB) can + * improve the performance of forward iteration on spinning disks. + * + * Default: 0 + * + * @param readaheadSize The readahead size is bytes + * @return the reference to the current ReadOptions. + */ + public ReadOptions setReadaheadSize(final long readaheadSize) { + assert(isOwningHandle()); + setReadaheadSize(nativeHandle_, readaheadSize); + return this; + } + + /** + * A threshold for the number of keys that can be skipped before failing an + * iterator seek as incomplete. + * + * @return the number of keys that can be skipped + * before failing an iterator seek as incomplete. + */ + public long maxSkippableInternalKeys() { + assert(isOwningHandle()); + return maxSkippableInternalKeys(nativeHandle_); + } + + /** + * A threshold for the number of keys that can be skipped before failing an + * iterator seek as incomplete. The default value of 0 should be used to + * never fail a request as incomplete, even on skipping too many keys. + * + * Default: 0 + * + * @param maxSkippableInternalKeys the number of keys that can be skipped + * before failing an iterator seek as incomplete. + * + * @return the reference to the current ReadOptions. + */ + public ReadOptions setMaxSkippableInternalKeys( + final long maxSkippableInternalKeys) { + assert(isOwningHandle()); + setMaxSkippableInternalKeys(nativeHandle_, maxSkippableInternalKeys); + return this; + } + + /** + * If true, keys deleted using the DeleteRange() API will be visible to + * readers until they are naturally deleted during compaction. This improves + * read performance in DBs with many range deletions. + * + * Default: false + * + * @return true if keys deleted using the DeleteRange() API will be visible + */ + public boolean ignoreRangeDeletions() { + assert(isOwningHandle()); + return ignoreRangeDeletions(nativeHandle_); + } + + /** + * If true, keys deleted using the DeleteRange() API will be visible to + * readers until they are naturally deleted during compaction. This improves + * read performance in DBs with many range deletions. + * + * Default: false + * + * @param ignoreRangeDeletions true if keys deleted using the DeleteRange() + * API should be visible + * @return the reference to the current ReadOptions. + */ + public ReadOptions setIgnoreRangeDeletions(final boolean ignoreRangeDeletions) { + assert(isOwningHandle()); + setIgnoreRangeDeletions(nativeHandle_, ignoreRangeDeletions); + return this; + } + + /** + * Defines the smallest key at which the backward + * iterator can return an entry. Once the bound is passed, + * {@link RocksIterator#isValid()} will be false. + * + * The lower bound is inclusive i.e. the bound value is a valid + * entry. + * + * If prefix_extractor is not null, the Seek target and `iterate_lower_bound` + * need to have the same prefix. This is because ordering is not guaranteed + * outside of prefix domain. + * + * Default: null + * + * @param iterateLowerBound Slice representing the lower bound + * @return the reference to the current ReadOptions. + */ + public ReadOptions setIterateLowerBound(final AbstractSlice iterateLowerBound) { + assert(isOwningHandle()); + setIterateLowerBound( + nativeHandle_, iterateLowerBound == null ? 0 : iterateLowerBound.getNativeHandle()); + // Hold onto a reference so it doesn't get garbage collected out from under us. + iterateLowerBoundSlice_ = iterateLowerBound; + return this; + } + + /** + * Returns the smallest key at which the backward + * iterator can return an entry. + * + * The lower bound is inclusive i.e. the bound value is a valid entry. + * + * @return the smallest key, or null if there is no lower bound defined. + */ + public Slice iterateLowerBound() { + assert(isOwningHandle()); + final long lowerBoundSliceHandle = iterateLowerBound(nativeHandle_); + if (lowerBoundSliceHandle != 0) { + // Disown the new slice - it's owned by the C++ side of the JNI boundary + // from the perspective of this method. + return new Slice(lowerBoundSliceHandle, false); + } + return null; + } + + /** + * Defines the extent up to which the forward iterator + * can returns entries. Once the bound is reached, + * {@link RocksIterator#isValid()} will be false. + * + * The upper bound is exclusive i.e. the bound value is not a valid entry. + * + * If prefix_extractor is not null, the Seek target and iterate_upper_bound + * need to have the same prefix. This is because ordering is not guaranteed + * outside of prefix domain. + * + * Default: null + * + * @param iterateUpperBound Slice representing the upper bound + * @return the reference to the current ReadOptions. + */ + public ReadOptions setIterateUpperBound(final AbstractSlice iterateUpperBound) { + assert(isOwningHandle()); + setIterateUpperBound( + nativeHandle_, iterateUpperBound == null ? 0 : iterateUpperBound.getNativeHandle()); + // Hold onto a reference so it doesn't get garbage collected out from under us. + iterateUpperBoundSlice_ = iterateUpperBound; + return this; + } + + /** + * Returns the largest key at which the forward + * iterator can return an entry. + * + * The upper bound is exclusive i.e. the bound value is not a valid entry. + * + * @return the largest key, or null if there is no upper bound defined. + */ + public Slice iterateUpperBound() { + assert(isOwningHandle()); + final long upperBoundSliceHandle = iterateUpperBound(nativeHandle_); + if (upperBoundSliceHandle != 0) { + // Disown the new slice - it's owned by the C++ side of the JNI boundary + // from the perspective of this method. + return new Slice(upperBoundSliceHandle, false); + } + return null; + } + + /** + * A callback to determine whether relevant keys for this scan exist in a + * given table based on the table's properties. The callback is passed the + * properties of each table during iteration. If the callback returns false, + * the table will not be scanned. This option only affects Iterators and has + * no impact on point lookups. + * + * Default: null (every table will be scanned) + * + * @param tableFilter the table filter for the callback. + * + * @return the reference to the current ReadOptions. + */ + public ReadOptions setTableFilter(final AbstractTableFilter tableFilter) { + assert(isOwningHandle()); + setTableFilter(nativeHandle_, tableFilter.nativeHandle_); + return this; + } + + /** + * When true, by default use total_order_seek = true, and RocksDB can + * selectively enable prefix seek mode if won't generate a different result + * from total_order_seek, based on seek key, and iterator upper bound. + * Not supported in ROCKSDB_LITE mode, in the way that even with value true + * prefix mode is not used. + * Default: false + * + * @return true if auto prefix mode is set. + * + */ + public boolean autoPrefixMode() { + assert (isOwningHandle()); + return autoPrefixMode(nativeHandle_); + } + + /** + * When true, by default use total_order_seek = true, and RocksDB can + * selectively enable prefix seek mode if won't generate a different result + * from total_order_seek, based on seek key, and iterator upper bound. + * Not supported in ROCKSDB_LITE mode, in the way that even with value true + * prefix mode is not used. + * Default: false + * @param mode auto prefix mode + * @return the reference to the current ReadOptions. + */ + public ReadOptions setAutoPrefixMode(final boolean mode) { + assert (isOwningHandle()); + setAutoPrefixMode(nativeHandle_, mode); + return this; + } + + /** + * Timestamp of operation. Read should return the latest data visible to the + * specified timestamp. All timestamps of the same database must be of the + * same length and format. The user is responsible for providing a customized + * compare function via Comparator to order >key, timestamp> tuples. + * For iterator, iter_start_ts is the lower bound (older) and timestamp + * serves as the upper bound. Versions of the same record that fall in + * the timestamp range will be returned. If iter_start_ts is nullptr, + * only the most recent version visible to timestamp is returned. + * The user-specified timestamp feature is still under active development, + * and the API is subject to change. + * + * Default: null + * @see #iterStartTs() + * @return Reference to timestamp or null if there is no timestamp defined. + */ + public Slice timestamp() { + assert (isOwningHandle()); + final long timestampSliceHandle = timestamp(nativeHandle_); + if (timestampSliceHandle != 0) { + return new Slice(timestampSliceHandle); + } else { + return null; + } + } + + /** + * Timestamp of operation. Read should return the latest data visible to the + * specified timestamp. All timestamps of the same database must be of the + * same length and format. The user is responsible for providing a customized + * compare function via Comparator to order {@code } tuples. + * For iterator, {@code iter_start_ts} is the lower bound (older) and timestamp + * serves as the upper bound. Versions of the same record that fall in + * the timestamp range will be returned. If iter_start_ts is nullptr, + * only the most recent version visible to timestamp is returned. + * The user-specified timestamp feature is still under active development, + * and the API is subject to change. + * + * Default: null + * @see #setIterStartTs(AbstractSlice) + * @param timestamp Slice representing the timestamp + * @return the reference to the current ReadOptions. + */ + public ReadOptions setTimestamp(final AbstractSlice timestamp) { + assert (isOwningHandle()); + setTimestamp(nativeHandle_, timestamp == null ? 0 : timestamp.getNativeHandle()); + timestampSlice_ = timestamp; + return this; + } + + /** + * Timestamp of operation. Read should return the latest data visible to the + * specified timestamp. All timestamps of the same database must be of the + * same length and format. The user is responsible for providing a customized + * compare function via Comparator to order {@code } tuples. + * For iterator, {@code iter_start_ts} is the lower bound (older) and timestamp + * serves as the upper bound. Versions of the same record that fall in + * the timestamp range will be returned. If iter_start_ts is nullptr, + * only the most recent version visible to timestamp is returned. + * The user-specified timestamp feature is still under active development, + * and the API is subject to change. + * + * Default: null + * @return Reference to lower bound timestamp or null if there is no lower bound timestamp + * defined. + */ + public Slice iterStartTs() { + assert (isOwningHandle()); + final long iterStartTsHandle = iterStartTs(nativeHandle_); + if (iterStartTsHandle != 0) { + return new Slice(iterStartTsHandle); + } else { + return null; + } + } + + /** + * Timestamp of operation. Read should return the latest data visible to the + * specified timestamp. All timestamps of the same database must be of the + * same length and format. The user is responsible for providing a customized + * compare function via Comparator to order {@code } tuples. + * For iterator, {@code iter_start_ts} is the lower bound (older) and timestamp + * serves as the upper bound. Versions of the same record that fall in + * the timestamp range will be returned. If iter_start_ts is nullptr, + * only the most recent version visible to timestamp is returned. + * The user-specified timestamp feature is still under active development, + * and the API is subject to change. + * + * Default: null + * + * @param iterStartTs Reference to lower bound timestamp or null if there is no lower bound + * timestamp defined + * @return the reference to the current ReadOptions. + */ + public ReadOptions setIterStartTs(final AbstractSlice iterStartTs) { + assert (isOwningHandle()); + setIterStartTs(nativeHandle_, iterStartTs == null ? 0 : iterStartTs.getNativeHandle()); + iterStartTs_ = iterStartTs; + return this; + } + + /** + * Deadline for completing an API call (Get/MultiGet/Seek/Next for now) + * in microseconds. + * It should be set to microseconds since epoch, i.e, {@code gettimeofday} or + * equivalent plus allowed duration in microseconds. The best way is to use + * {@code env->NowMicros() + some timeout}. + * This is best efforts. The call may exceed the deadline if there is IO + * involved and the file system doesn't support deadlines, or due to + * checking for deadline periodically rather than for every key if + * processing a batch + * + * @return deadline time in microseconds + */ + public long deadline() { + assert (isOwningHandle()); + return deadline(nativeHandle_); + } + + /** + * Deadline for completing an API call (Get/MultiGet/Seek/Next for now) + * in microseconds. + * It should be set to microseconds since epoch, i.e, {@code gettimeofday} or + * equivalent plus allowed duration in microseconds. The best way is to use + * {@code env->NowMicros() + some timeout}. + * This is best efforts. The call may exceed the deadline if there is IO + * involved and the file system doesn't support deadlines, or due to + * checking for deadline periodically rather than for every key if + * processing a batch + * + * @param deadlineTime deadline time in microseconds. + * @return the reference to the current ReadOptions. + */ + public ReadOptions setDeadline(final long deadlineTime) { + assert (isOwningHandle()); + setDeadline(nativeHandle_, deadlineTime); + return this; + } + + /** + * A timeout in microseconds to be passed to the underlying FileSystem for + * reads. As opposed to deadline, this determines the timeout for each + * individual file read request. If a MultiGet/Get/Seek/Next etc call + * results in multiple reads, each read can last up to io_timeout us. + * @return ioTimeout time in microseconds + */ + public long ioTimeout() { + assert (isOwningHandle()); + return ioTimeout(nativeHandle_); + } + + /** + * A timeout in microseconds to be passed to the underlying FileSystem for + * reads. As opposed to deadline, this determines the timeout for each + * individual file read request. If a MultiGet/Get/Seek/Next etc call + * results in multiple reads, each read can last up to io_timeout us. + * + * @param ioTimeout time in microseconds. + * @return the reference to the current ReadOptions. + */ + public ReadOptions setIoTimeout(final long ioTimeout) { + assert (isOwningHandle()); + setIoTimeout(nativeHandle_, ioTimeout); + return this; + } + + /** + * It limits the maximum cumulative value size of the keys in batch while + * reading through MultiGet. Once the cumulative value size exceeds this + * soft limit then all the remaining keys are returned with status Aborted. + * + * Default: {@code std::numeric_limits::max()} + * @return actual valueSizeSofLimit + */ + public long valueSizeSoftLimit() { + assert (isOwningHandle()); + return valueSizeSoftLimit(nativeHandle_); + } + + /** + * It limits the maximum cumulative value size of the keys in batch while + * reading through MultiGet. Once the cumulative value size exceeds this + * soft limit then all the remaining keys are returned with status Aborted. + * + * Default: {@code std::numeric_limits::max()} + * + * @param valueSizeSoftLimit the maximum cumulative value size of the keys + * @return the reference to the current ReadOptions + */ + public ReadOptions setValueSizeSoftLimit(final long valueSizeSoftLimit) { + assert (isOwningHandle()); + setValueSizeSoftLimit(nativeHandle_, valueSizeSoftLimit); + return this; + } + + // instance variables + // NOTE: If you add new member variables, please update the copy constructor above! + // + // Hold a reference to any iterate lower or upper bound that was set on this + // object until we're destroyed or it's overwritten. That way the caller can + // freely leave scope without us losing the Java Slice object, which during + // close() would also reap its associated rocksdb::Slice native object since + // it's possibly (likely) to be an owning handle. + private AbstractSlice iterateLowerBoundSlice_; + private AbstractSlice iterateUpperBoundSlice_; + private AbstractSlice timestampSlice_; + private AbstractSlice iterStartTs_; + + private native static long newReadOptions(); + private native static long newReadOptions(final boolean verifyChecksums, + final boolean fillCache); + private native static long copyReadOptions(long handle); + @Override protected final native void disposeInternal(final long handle); + + private native boolean verifyChecksums(long handle); + private native void setVerifyChecksums(long handle, boolean verifyChecksums); + private native boolean fillCache(long handle); + private native void setFillCache(long handle, boolean fillCache); + private native long snapshot(long handle); + private native void setSnapshot(long handle, long snapshotHandle); + private native byte readTier(long handle); + private native void setReadTier(long handle, byte readTierValue); + private native boolean tailing(long handle); + private native void setTailing(long handle, boolean tailing); + private native boolean managed(long handle); + private native void setManaged(long handle, boolean managed); + private native boolean totalOrderSeek(long handle); + private native void setTotalOrderSeek(long handle, boolean totalOrderSeek); + private native boolean prefixSameAsStart(long handle); + private native void setPrefixSameAsStart(long handle, boolean prefixSameAsStart); + private native boolean pinData(long handle); + private native void setPinData(long handle, boolean pinData); + private native boolean backgroundPurgeOnIteratorCleanup(final long handle); + private native void setBackgroundPurgeOnIteratorCleanup(final long handle, + final boolean backgroundPurgeOnIteratorCleanup); + private native long readaheadSize(final long handle); + private native void setReadaheadSize(final long handle, + final long readaheadSize); + private native long maxSkippableInternalKeys(final long handle); + private native void setMaxSkippableInternalKeys(final long handle, + final long maxSkippableInternalKeys); + private native boolean ignoreRangeDeletions(final long handle); + private native void setIgnoreRangeDeletions(final long handle, + final boolean ignoreRangeDeletions); + private native void setIterateUpperBound(final long handle, + final long upperBoundSliceHandle); + private native long iterateUpperBound(final long handle); + private native void setIterateLowerBound(final long handle, + final long lowerBoundSliceHandle); + private native long iterateLowerBound(final long handle); + private native void setTableFilter(final long handle, final long tableFilterHandle); + private native boolean autoPrefixMode(final long handle); + private native void setAutoPrefixMode(final long handle, final boolean autoPrefixMode); + private native long timestamp(final long handle); + private native void setTimestamp(final long handle, final long timestampSliceHandle); + private native long iterStartTs(final long handle); + private native void setIterStartTs(final long handle, final long iterStartTsHandle); + private native long deadline(final long handle); + private native void setDeadline(final long handle, final long deadlineTime); + private native long ioTimeout(final long handle); + private native void setIoTimeout(final long handle, final long ioTimeout); + private native long valueSizeSoftLimit(final long handle); + private native void setValueSizeSoftLimit(final long handle, final long softLimit); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java b/src/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java new file mode 100644 index 000000000..78f83f6ad --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java @@ -0,0 +1,49 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * RocksDB {@link ReadOptions} read tiers. + */ +public enum ReadTier { + READ_ALL_TIER((byte)0), + BLOCK_CACHE_TIER((byte)1), + PERSISTED_TIER((byte)2), + MEMTABLE_TIER((byte)3); + + private final byte value; + + ReadTier(final byte value) { + this.value = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + * Get ReadTier by byte value. + * + * @param value byte representation of ReadTier. + * + * @return {@link org.rocksdb.ReadTier} instance or null. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static ReadTier getReadTier(final byte value) { + for (final ReadTier readTier : ReadTier.values()) { + if (readTier.getValue() == value){ + return readTier; + } + } + throw new IllegalArgumentException("Illegal value provided for ReadTier."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java new file mode 100644 index 000000000..6ee81d858 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java @@ -0,0 +1,18 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++ + */ +public class RemoveEmptyValueCompactionFilter + extends AbstractCompactionFilter { + public RemoveEmptyValueCompactionFilter() { + super(createNewRemoveEmptyValueCompactionFilter0()); + } + + private native static long createNewRemoveEmptyValueCompactionFilter0(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java new file mode 100644 index 000000000..54dc0e61c --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * RestoreOptions to control the behavior of restore. + * + * Note that dispose() must be called before this instance become out-of-scope + * to release the allocated memory in c++. + * + */ +public class RestoreOptions extends RocksObject { + /** + * Constructor + * + * @param keepLogFiles If true, restore won't overwrite the existing log files + * in wal_dir. It will also move all log files from archive directory to + * wal_dir. Use this option in combination with + * BackupEngineOptions::backup_log_files = false for persisting in-memory + * databases. + * Default: false + */ + public RestoreOptions(final boolean keepLogFiles) { + super(newRestoreOptions(keepLogFiles)); + } + + private native static long newRestoreOptions(boolean keepLogFiles); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ReusedSynchronisationType.java b/src/rocksdb/java/src/main/java/org/rocksdb/ReusedSynchronisationType.java new file mode 100644 index 000000000..2709a5d59 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ReusedSynchronisationType.java @@ -0,0 +1,65 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +/** + * Determines the type of synchronisation primitive used + * in native code. + */ +public enum ReusedSynchronisationType { + /** + * Standard mutex. + */ + MUTEX((byte)0x0), + + /** + * Use adaptive mutex, which spins in the user space before resorting + * to kernel. This could reduce context switch when the mutex is not + * heavily contended. However, if the mutex is hot, we could end up + * wasting spin time. + */ + ADAPTIVE_MUTEX((byte)0x1), + + /** + * There is a reused buffer per-thread. + */ + THREAD_LOCAL((byte)0x2); + + private final byte value; + + ReusedSynchronisationType(final byte value) { + this.value = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + * Get ReusedSynchronisationType by byte value. + * + * @param value byte representation of ReusedSynchronisationType. + * + * @return {@link org.rocksdb.ReusedSynchronisationType} instance. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static ReusedSynchronisationType getReusedSynchronisationType( + final byte value) { + for (final ReusedSynchronisationType reusedSynchronisationType + : ReusedSynchronisationType.values()) { + if (reusedSynchronisationType.getValue() == value) { + return reusedSynchronisationType; + } + } + throw new IllegalArgumentException( + "Illegal value provided for ReusedSynchronisationType."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksCallbackObject.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksCallbackObject.java new file mode 100644 index 000000000..8d7a867ee --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksCallbackObject.java @@ -0,0 +1,73 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; + +/** + * RocksCallbackObject is similar to {@link RocksObject} but varies + * in its construction as it is designed for Java objects which have functions + * which are called from C++ via JNI. + * + * RocksCallbackObject is the base-class any RocksDB classes that acts as a + * callback from some underlying underlying native C++ {@code rocksdb} object. + * + * The use of {@code RocksObject} should always be preferred over + * {@link RocksCallbackObject} if callbacks are not required. + */ +public abstract class RocksCallbackObject extends + AbstractImmutableNativeReference { + + protected final long nativeHandle_; + + protected RocksCallbackObject(final long... nativeParameterHandles) { + super(true); + this.nativeHandle_ = initializeNative(nativeParameterHandles); + } + + /** + * Given a list of RocksCallbackObjects, it returns a list + * of the native handles of the underlying objects. + * + * @param objectList the rocks callback objects + * + * @return the native handles + */ + static /* @Nullable */ long[] toNativeHandleList( + /* @Nullable */ final List objectList) { + if (objectList == null) { + return null; + } + final int len = objectList.size(); + final long[] handleList = new long[len]; + for (int i = 0; i < len; i++) { + handleList[i] = objectList.get(i).nativeHandle_; + } + return handleList; + } + + /** + * Construct the Native C++ object which will callback + * to our object methods + * + * @param nativeParameterHandles An array of native handles for any parameter + * objects that are needed during construction + * + * @return The native handle of the C++ object which will callback to us + */ + protected abstract long initializeNative( + final long... nativeParameterHandles); + + /** + * Deletes underlying C++ native callback object pointer + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + private native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java new file mode 100644 index 000000000..77484288f --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java @@ -0,0 +1,4694 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import org.rocksdb.util.Environment; + +/** + * A RocksDB is a persistent ordered map from keys to values. It is safe for + * concurrent access from multiple threads without any external synchronization. + * All methods of this class could potentially throw RocksDBException, which + * indicates sth wrong at the RocksDB library side and the call failed. + */ +public class RocksDB extends RocksObject { + public static final byte[] DEFAULT_COLUMN_FAMILY = "default".getBytes(UTF_8); + public static final int NOT_FOUND = -1; + + private enum LibraryState { + NOT_LOADED, + LOADING, + LOADED + } + + private static final AtomicReference libraryLoaded = + new AtomicReference<>(LibraryState.NOT_LOADED); + + static { + RocksDB.loadLibrary(); + } + + private final List ownedColumnFamilyHandles = new ArrayList<>(); + + /** + * Loads the necessary library files. + * Calling this method twice will have no effect. + * By default the method extracts the shared library for loading at + * java.io.tmpdir, however, you can override this temporary location by + * setting the environment variable ROCKSDB_SHAREDLIB_DIR. + */ + public static void loadLibrary() { + if (libraryLoaded.get() == LibraryState.LOADED) { + return; + } + + if (libraryLoaded.compareAndSet(LibraryState.NOT_LOADED, + LibraryState.LOADING)) { + final String tmpDir = System.getenv("ROCKSDB_SHAREDLIB_DIR"); + // loading possibly necessary libraries. + for (final CompressionType compressionType : CompressionType.values()) { + try { + if (compressionType.getLibraryName() != null) { + System.loadLibrary(compressionType.getLibraryName()); + } + } catch (final UnsatisfiedLinkError e) { + // since it may be optional, we ignore its loading failure here. + } + } + try { + NativeLibraryLoader.getInstance().loadLibrary(tmpDir); + } catch (final IOException e) { + libraryLoaded.set(LibraryState.NOT_LOADED); + throw new RuntimeException("Unable to load the RocksDB shared library", + e); + } + + final int encodedVersion = version(); + version = Version.fromEncodedVersion(encodedVersion); + + libraryLoaded.set(LibraryState.LOADED); + return; + } + + while (libraryLoaded.get() == LibraryState.LOADING) { + try { + Thread.sleep(10); + } catch(final InterruptedException e) { + //ignore + } + } + } + + /** + * Tries to load the necessary library files from the given list of + * directories. + * + * @param paths a list of strings where each describes a directory + * of a library. + */ + public static void loadLibrary(final List paths) { + if (libraryLoaded.get() == LibraryState.LOADED) { + return; + } + + if (libraryLoaded.compareAndSet(LibraryState.NOT_LOADED, + LibraryState.LOADING)) { + for (final CompressionType compressionType : CompressionType.values()) { + if (compressionType.equals(CompressionType.NO_COMPRESSION)) { + continue; + } + for (final String path : paths) { + try { + System.load(path + "/" + Environment.getSharedLibraryFileName( + compressionType.getLibraryName())); + break; + } catch (final UnsatisfiedLinkError e) { + // since they are optional, we ignore loading fails. + } + } + } + boolean success = false; + UnsatisfiedLinkError err = null; + for (final String path : paths) { + try { + System.load(path + "/" + + Environment.getJniLibraryFileName("rocksdbjni")); + success = true; + break; + } catch (final UnsatisfiedLinkError e) { + err = e; + } + } + if (!success) { + libraryLoaded.set(LibraryState.NOT_LOADED); + throw err; + } + + final int encodedVersion = version(); + version = Version.fromEncodedVersion(encodedVersion); + + libraryLoaded.set(LibraryState.LOADED); + return; + } + + while (libraryLoaded.get() == LibraryState.LOADING) { + try { + Thread.sleep(10); + } catch(final InterruptedException e) { + //ignore + } + } + } + + public static Version rocksdbVersion() { + return version; + } + + /** + * Private constructor. + * + * @param nativeHandle The native handle of the C++ RocksDB object + */ + protected RocksDB(final long nativeHandle) { + super(nativeHandle); + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance given + * the path to the database using the default options w/ createIfMissing + * set to true. + * + * @param path the path to the rocksdb. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @see Options#setCreateIfMissing(boolean) + */ + public static RocksDB open(final String path) throws RocksDBException { + final Options options = new Options(); + options.setCreateIfMissing(true); + return open(options, path); + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance given + * the path to the database using the specified options and db path and a list + * of column family names. + *

+ * If opened in read write mode every existing column family name must be + * passed within the list to this method.

+ *

+ * If opened in read-only mode only a subset of existing column families must + * be passed to this method.

+ *

+ * Options instance *should* not be disposed before all DBs using this options + * instance have been closed. If user doesn't call options dispose explicitly, + * then this options instance will be GC'd automatically

+ *

+ * ColumnFamily handles are disposed when the RocksDB instance is disposed. + *

+ * + * @param path the path to the rocksdb. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @see DBOptions#setCreateIfMissing(boolean) + */ + public static RocksDB open(final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) + throws RocksDBException { + final DBOptions options = new DBOptions(); + return open(options, path, columnFamilyDescriptors, columnFamilyHandles); + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance given + * the path to the database using the specified options and db path. + * + *

+ * Options instance *should* not be disposed before all DBs using this options + * instance have been closed. If user doesn't call options dispose explicitly, + * then this options instance will be GC'd automatically.

+ *

+ * Options instance can be re-used to open multiple DBs if DB statistics is + * not used. If DB statistics are required, then its recommended to open DB + * with new Options instance as underlying native statistics instance does not + * use any locks to prevent concurrent updates.

+ * + * @param options {@link org.rocksdb.Options} instance. + * @param path the path to the rocksdb. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @see Options#setCreateIfMissing(boolean) + */ + public static RocksDB open(final Options options, final String path) + throws RocksDBException { + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + final RocksDB db = new RocksDB(open(options.nativeHandle_, path)); + db.storeOptionsInstance(options); + return db; + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance given + * the path to the database using the specified options and db path and a list + * of column family names. + *

+ * If opened in read write mode every existing column family name must be + * passed within the list to this method.

+ *

+ * If opened in read-only mode only a subset of existing column families must + * be passed to this method.

+ *

+ * Options instance *should* not be disposed before all DBs using this options + * instance have been closed. If user doesn't call options dispose explicitly, + * then this options instance will be GC'd automatically.

+ *

+ * Options instance can be re-used to open multiple DBs if DB statistics is + * not used. If DB statistics are required, then its recommended to open DB + * with new Options instance as underlying native statistics instance does not + * use any locks to prevent concurrent updates.

+ *

+ * ColumnFamily handles are disposed when the RocksDB instance is disposed. + *

+ * + * @param options {@link org.rocksdb.DBOptions} instance. + * @param path the path to the rocksdb. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @see DBOptions#setCreateIfMissing(boolean) + */ + public static RocksDB open(final DBOptions options, final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) + throws RocksDBException { + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors + .get(i); + cfNames[i] = cfDescriptor.getName(); + cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_; + } + + final long[] handles = open(options.nativeHandle_, path, cfNames, + cfOptionHandles); + final RocksDB db = new RocksDB(handles[0]); + db.storeOptionsInstance(options); + + for (int i = 1; i < handles.length; i++) { + final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(db, handles[i]); + columnFamilyHandles.add(columnFamilyHandle); + } + + db.ownedColumnFamilyHandles.addAll(columnFamilyHandles); + + return db; + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance in + * Read-Only mode given the path to the database using the default + * options. + * + * @param path the path to the RocksDB. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openReadOnly(final String path) + throws RocksDBException { + // This allows to use the rocksjni default Options instead of + // the c++ one. + final Options options = new Options(); + return openReadOnly(options, path); + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance in + * Read-Only mode given the path to the database using the specified + * options and db path. + * + * Options instance *should* not be disposed before all DBs using this options + * instance have been closed. If user doesn't call options dispose explicitly, + * then this options instance will be GC'd automatically. + * + * @param options {@link Options} instance. + * @param path the path to the RocksDB. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openReadOnly(final Options options, final String path) + throws RocksDBException { + return openReadOnly(options, path, false); + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance in + * Read-Only mode given the path to the database using the specified + * options and db path. + * + * Options instance *should* not be disposed before all DBs using this options + * instance have been closed. If user doesn't call options dispose explicitly, + * then this options instance will be GC'd automatically. + * + * @param options {@link Options} instance. + * @param path the path to the RocksDB. + * @param errorIfWalFileExists true to raise an error when opening the db + * if a Write Ahead Log file exists, false otherwise. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openReadOnly(final Options options, final String path, + final boolean errorIfWalFileExists) throws RocksDBException { + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + final RocksDB db = new RocksDB(openROnly(options.nativeHandle_, path, errorIfWalFileExists)); + db.storeOptionsInstance(options); + return db; + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance in + * Read-Only mode given the path to the database using the default + * options. + * + * @param path the path to the RocksDB. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openReadOnly(final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) + throws RocksDBException { + // This allows to use the rocksjni default Options instead of + // the c++ one. + final DBOptions options = new DBOptions(); + return openReadOnly(options, path, columnFamilyDescriptors, columnFamilyHandles, false); + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance in + * Read-Only mode given the path to the database using the specified + * options and db path. + * + *

This open method allows to open RocksDB using a subset of available + * column families

+ *

Options instance *should* not be disposed before all DBs using this + * options instance have been closed. If user doesn't call options dispose + * explicitly,then this options instance will be GC'd automatically.

+ * + * @param options {@link DBOptions} instance. + * @param path the path to the RocksDB. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openReadOnly(final DBOptions options, final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) throws RocksDBException { + return openReadOnly(options, path, columnFamilyDescriptors, columnFamilyHandles, false); + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance in + * Read-Only mode given the path to the database using the specified + * options and db path. + * + *

This open method allows to open RocksDB using a subset of available + * column families

+ *

Options instance *should* not be disposed before all DBs using this + * options instance have been closed. If user doesn't call options dispose + * explicitly,then this options instance will be GC'd automatically.

+ * + * @param options {@link DBOptions} instance. + * @param path the path to the RocksDB. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * @param errorIfWalFileExists true to raise an error when opening the db + * if a Write Ahead Log file exists, false otherwise. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openReadOnly(final DBOptions options, final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles, final boolean errorIfWalFileExists) + throws RocksDBException { + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors + .get(i); + cfNames[i] = cfDescriptor.getName(); + cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_; + } + + final long[] handles = + openROnly(options.nativeHandle_, path, cfNames, cfOptionHandles, errorIfWalFileExists); + final RocksDB db = new RocksDB(handles[0]); + db.storeOptionsInstance(options); + + for (int i = 1; i < handles.length; i++) { + final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(db, handles[i]); + columnFamilyHandles.add(columnFamilyHandle); + } + + db.ownedColumnFamilyHandles.addAll(columnFamilyHandles); + + return db; + } + + /** + * Open DB as secondary instance with only the default column family. + * + * The secondary instance can dynamically tail the MANIFEST of + * a primary that must have already been created. User can call + * {@link #tryCatchUpWithPrimary()} to make the secondary instance catch up + * with primary (WAL tailing is NOT supported now) whenever the user feels + * necessary. Column families created by the primary after the secondary + * instance starts are currently ignored by the secondary instance. + * Column families opened by secondary and dropped by the primary will be + * dropped by secondary as well. However the user of the secondary instance + * can still access the data of such dropped column family as long as they + * do not destroy the corresponding column family handle. + * WAL tailing is not supported at present, but will arrive soon. + * + * @param options the options to open the secondary instance. + * @param path the path to the primary RocksDB instance. + * @param secondaryPath points to a directory where the secondary instance + * stores its info log + * + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openAsSecondary(final Options options, final String path, + final String secondaryPath) throws RocksDBException { + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + final RocksDB db = new RocksDB(openAsSecondary(options.nativeHandle_, path, secondaryPath)); + db.storeOptionsInstance(options); + return db; + } + + /** + * Open DB as secondary instance with column families. + * You can open a subset of column families in secondary mode. + * + * The secondary instance can dynamically tail the MANIFEST of + * a primary that must have already been created. User can call + * {@link #tryCatchUpWithPrimary()} to make the secondary instance catch up + * with primary (WAL tailing is NOT supported now) whenever the user feels + * necessary. Column families created by the primary after the secondary + * instance starts are currently ignored by the secondary instance. + * Column families opened by secondary and dropped by the primary will be + * dropped by secondary as well. However the user of the secondary instance + * can still access the data of such dropped column family as long as they + * do not destroy the corresponding column family handle. + * WAL tailing is not supported at present, but will arrive soon. + * + * @param options the options to open the secondary instance. + * @param path the path to the primary RocksDB instance. + * @param secondaryPath points to a directory where the secondary instance + * stores its info log. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openAsSecondary(final DBOptions options, final String path, + final String secondaryPath, final List columnFamilyDescriptors, + final List columnFamilyHandles) throws RocksDBException { + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors.get(i); + cfNames[i] = cfDescriptor.getName(); + cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_; + } + + final long[] handles = + openAsSecondary(options.nativeHandle_, path, secondaryPath, cfNames, cfOptionHandles); + final RocksDB db = new RocksDB(handles[0]); + db.storeOptionsInstance(options); + + for (int i = 1; i < handles.length; i++) { + final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(db, handles[i]); + columnFamilyHandles.add(columnFamilyHandle); + } + + db.ownedColumnFamilyHandles.addAll(columnFamilyHandles); + + return db; + } + + /** + * This is similar to {@link #close()} except that it + * throws an exception if any error occurs. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + * + * @throws RocksDBException if an error occurs whilst closing. + */ + public void closeE() throws RocksDBException { + for (final ColumnFamilyHandle columnFamilyHandle : ownedColumnFamilyHandles) { + columnFamilyHandle.close(); + } + ownedColumnFamilyHandles.clear(); + + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } finally { + disposeInternal(); + } + } + } + + /** + * This is similar to {@link #closeE()} except that it + * silently ignores any errors. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + */ + @Override + public void close() { + for (final ColumnFamilyHandle columnFamilyHandle : ownedColumnFamilyHandles) { + columnFamilyHandle.close(); + } + ownedColumnFamilyHandles.clear(); + + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } catch (final RocksDBException e) { + // silently ignore the error report + } finally { + disposeInternal(); + } + } + } + + /** + * Static method to determine all available column families for a + * rocksdb database identified by path + * + * @param options Options for opening the database + * @param path Absolute path to rocksdb database + * @return List<byte[]> List containing the column family names + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static List listColumnFamilies(final Options options, + final String path) throws RocksDBException { + return Arrays.asList(RocksDB.listColumnFamilies(options.nativeHandle_, + path)); + } + + /** + * Creates a new column family with the name columnFamilyName and + * allocates a ColumnFamilyHandle within an internal structure. + * The ColumnFamilyHandle is automatically disposed with DB disposal. + * + * @param columnFamilyDescriptor column family to be created. + * @return {@link org.rocksdb.ColumnFamilyHandle} instance. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public ColumnFamilyHandle createColumnFamily( + final ColumnFamilyDescriptor columnFamilyDescriptor) + throws RocksDBException { + final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, + createColumnFamily(nativeHandle_, columnFamilyDescriptor.getName(), + columnFamilyDescriptor.getName().length, + columnFamilyDescriptor.getOptions().nativeHandle_)); + ownedColumnFamilyHandles.add(columnFamilyHandle); + return columnFamilyHandle; + } + + /** + * Bulk create column families with the same column family options. + * + * @param columnFamilyOptions the options for the column families. + * @param columnFamilyNames the names of the column families. + * + * @return the handles to the newly created column families. + * + * @throws RocksDBException if an error occurs whilst creating + * the column families + */ + public List createColumnFamilies( + final ColumnFamilyOptions columnFamilyOptions, + final List columnFamilyNames) throws RocksDBException { + final byte[][] cfNames = columnFamilyNames.toArray( + new byte[0][]); + final long[] cfHandles = createColumnFamilies(nativeHandle_, + columnFamilyOptions.nativeHandle_, cfNames); + final List columnFamilyHandles = + new ArrayList<>(cfHandles.length); + for (int i = 0; i < cfHandles.length; i++) { + final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, cfHandles[i]); + columnFamilyHandles.add(columnFamilyHandle); + } + ownedColumnFamilyHandles.addAll(columnFamilyHandles); + return columnFamilyHandles; + } + + /** + * Bulk create column families with the same column family options. + * + * @param columnFamilyDescriptors the descriptions of the column families. + * + * @return the handles to the newly created column families. + * + * @throws RocksDBException if an error occurs whilst creating + * the column families + */ + public List createColumnFamilies( + final List columnFamilyDescriptors) + throws RocksDBException { + final long[] cfOptsHandles = new long[columnFamilyDescriptors.size()]; + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor columnFamilyDescriptor + = columnFamilyDescriptors.get(i); + cfOptsHandles[i] = columnFamilyDescriptor.getOptions().nativeHandle_; + cfNames[i] = columnFamilyDescriptor.getName(); + } + final long[] cfHandles = createColumnFamilies(nativeHandle_, + cfOptsHandles, cfNames); + final List columnFamilyHandles = + new ArrayList<>(cfHandles.length); + for (int i = 0; i < cfHandles.length; i++) { + final ColumnFamilyHandle columnFamilyHandle = new ColumnFamilyHandle(this, cfHandles[i]); + columnFamilyHandles.add(columnFamilyHandle); + } + ownedColumnFamilyHandles.addAll(columnFamilyHandles); + return columnFamilyHandles; + } + + /** + * Drops the column family specified by {@code columnFamilyHandle}. This call + * only records a drop record in the manifest and prevents the column + * family from flushing and compacting. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void dropColumnFamily(final ColumnFamilyHandle columnFamilyHandle) + throws RocksDBException { + dropColumnFamily(nativeHandle_, columnFamilyHandle.nativeHandle_); + } + + // Bulk drop column families. This call only records drop records in the + // manifest and prevents the column families from flushing and compacting. + // In case of error, the request may succeed partially. User may call + // ListColumnFamilies to check the result. + public void dropColumnFamilies( + final List columnFamilies) throws RocksDBException { + final long[] cfHandles = new long[columnFamilies.size()]; + for (int i = 0; i < columnFamilies.size(); i++) { + cfHandles[i] = columnFamilies.get(i).nativeHandle_; + } + dropColumnFamilies(nativeHandle_, cfHandles); + } + + /** + * Deletes native column family handle of given {@link ColumnFamilyHandle} Java object + * and removes reference from {@link RocksDB#ownedColumnFamilyHandles}. + * + * @param columnFamilyHandle column family handle object. + */ + public void destroyColumnFamilyHandle(final ColumnFamilyHandle columnFamilyHandle) { + for (int i = 0; i < ownedColumnFamilyHandles.size(); ++i) { + final ColumnFamilyHandle ownedHandle = ownedColumnFamilyHandles.get(i); + if (ownedHandle.equals(columnFamilyHandle)) { + columnFamilyHandle.close(); + ownedColumnFamilyHandles.remove(i); + return; + } + } + } + + /** + * Set the database entry for "key" to "value". + * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void put(final byte[] key, final byte[] value) + throws RocksDBException { + put(nativeHandle_, key, 0, key.length, value, 0, value.length); + } + + /** + * Set the database entry for "key" to "value". + * + * @param key The specified key to be inserted + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the value associated with the specified key + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if errors happens in underlying native + * library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void put(final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + put(nativeHandle_, key, offset, len, value, vOffset, vLen); + } + + /** + * Set the database entry for "key" to "value" in the specified + * column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * throws IllegalArgumentException if column family is not present + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value) throws RocksDBException { + put(nativeHandle_, key, 0, key.length, value, 0, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Set the database entry for "key" to "value" in the specified + * column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key The specified key to be inserted + * @param offset the offset of the "key" array to be used, must + * be non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the value associated with the specified key + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if errors happens in underlying native + * library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + put(nativeHandle_, key, offset, len, value, vOffset, vLen, + columnFamilyHandle.nativeHandle_); + } + + /** + * Set the database entry for "key" to "value". + * + * @param writeOpts {@link org.rocksdb.WriteOptions} instance. + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void put(final WriteOptions writeOpts, final byte[] key, + final byte[] value) throws RocksDBException { + put(nativeHandle_, writeOpts.nativeHandle_, + key, 0, key.length, value, 0, value.length); + } + + /** + * Set the database entry for "key" to "value". + * + * @param writeOpts {@link org.rocksdb.WriteOptions} instance. + * @param key The specified key to be inserted + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the value associated with the specified key + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void put(final WriteOptions writeOpts, + final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + put(nativeHandle_, writeOpts.nativeHandle_, + key, offset, len, value, vOffset, vLen); + } + + /** + * Set the database entry for "key" to "value" for the specified + * column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param writeOpts {@link org.rocksdb.WriteOptions} instance. + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * throws IllegalArgumentException if column family is not present + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @see IllegalArgumentException + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpts, final byte[] key, + final byte[] value) throws RocksDBException { + put(nativeHandle_, writeOpts.nativeHandle_, key, 0, key.length, value, + 0, value.length, columnFamilyHandle.nativeHandle_); + } + + /** + * Set the database entry for "key" to "value" for the specified + * column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param writeOpts {@link org.rocksdb.WriteOptions} instance. + * @param key the specified key to be inserted. Position and limit is used. + * Supports direct buffer only. + * @param value the value associated with the specified key. Position and limit is used. + * Supports direct buffer only. + * + * throws IllegalArgumentException if column family is not present + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @see IllegalArgumentException + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpts, + final ByteBuffer key, final ByteBuffer value) throws RocksDBException { + assert key.isDirect() && value.isDirect(); + putDirect(nativeHandle_, writeOpts.nativeHandle_, key, key.position(), key.remaining(), value, + value.position(), value.remaining(), columnFamilyHandle.nativeHandle_); + key.position(key.limit()); + value.position(value.limit()); + } + + /** + * Set the database entry for "key" to "value". + * + * @param writeOpts {@link org.rocksdb.WriteOptions} instance. + * @param key the specified key to be inserted. Position and limit is used. + * Supports direct buffer only. + * @param value the value associated with the specified key. Position and limit is used. + * Supports direct buffer only. + * + * throws IllegalArgumentException if column family is not present + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @see IllegalArgumentException + */ + public void put(final WriteOptions writeOpts, final ByteBuffer key, final ByteBuffer value) + throws RocksDBException { + assert key.isDirect() && value.isDirect(); + putDirect(nativeHandle_, writeOpts.nativeHandle_, key, key.position(), key.remaining(), value, + value.position(), value.remaining(), 0); + key.position(key.limit()); + value.position(value.limit()); + } + + /** + * Set the database entry for "key" to "value" for the specified + * column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param writeOpts {@link org.rocksdb.WriteOptions} instance. + * @param key The specified key to be inserted + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the value associated with the specified key + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpts, + final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + put(nativeHandle_, writeOpts.nativeHandle_, key, offset, len, value, + vOffset, vLen, columnFamilyHandle.nativeHandle_); + } + + /** + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final byte[] key) throws RocksDBException { + delete(nativeHandle_, key, 0, key.length); + } + + /** + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param key Key to delete within database + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be + * non-negative and no larger than ("key".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final byte[] key, final int offset, final int len) + throws RocksDBException { + delete(nativeHandle_, key, offset, len); + } + + /** + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + delete(nativeHandle_, key, 0, key.length, columnFamilyHandle.nativeHandle_); + } + + /** + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key Key to delete within database + * @param offset the offset of the "key" array to be used, + * must be non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final int offset, final int len) + throws RocksDBException { + delete(nativeHandle_, key, offset, len, columnFamilyHandle.nativeHandle_); + } + + /** + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final WriteOptions writeOpt, final byte[] key) + throws RocksDBException { + delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length); + } + + /** + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be + * non-negative and no larger than ("key".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final WriteOptions writeOpt, final byte[] key, + final int offset, final int len) throws RocksDBException { + delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len); + } + + /** + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpt, final byte[] key) + throws RocksDBException { + delete(nativeHandle_, writeOpt.nativeHandle_, key, 0, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be + * non-negative and no larger than ("key".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpt, final byte[] key, final int offset, + final int len) throws RocksDBException { + delete(nativeHandle_, writeOpt.nativeHandle_, key, offset, len, + columnFamilyHandle.nativeHandle_); + } + + /** + * Get the value associated with the specified key within column family. + * + * @param opt {@link org.rocksdb.ReadOptions} instance. + * @param key the key to retrieve the value. It is using position and limit. + * Supports direct buffer only. + * @param value the out-value to receive the retrieved value. + * It is using position and limit. Limit is set according to value size. + * Supports direct buffer only. + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final ReadOptions opt, final ByteBuffer key, final ByteBuffer value) + throws RocksDBException { + assert key.isDirect() && value.isDirect(); + int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(), + value, value.position(), value.remaining(), 0); + if (result != NOT_FOUND) { + value.limit(Math.min(value.limit(), value.position() + result)); + } + key.position(key.limit()); + return result; + } + + /** + * Get the value associated with the specified key within column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param opt {@link org.rocksdb.ReadOptions} instance. + * @param key the key to retrieve the value. It is using position and limit. + * Supports direct buffer only. + * @param value the out-value to receive the retrieved value. + * It is using position and limit. Limit is set according to value size. + * Supports direct buffer only. + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final ColumnFamilyHandle columnFamilyHandle, final ReadOptions opt, + final ByteBuffer key, final ByteBuffer value) throws RocksDBException { + assert key.isDirect() && value.isDirect(); + int result = getDirect(nativeHandle_, opt.nativeHandle_, key, key.position(), key.remaining(), + value, value.position(), value.remaining(), columnFamilyHandle.nativeHandle_); + if (result != NOT_FOUND) { + value.limit(Math.min(value.limit(), value.position() + result)); + } + key.position(key.limit()); + return result; + } + + /** + * Remove the database entry for {@code key}. Requires that the key exists + * and was not overwritten. It is not an error if the key did not exist + * in the database. + * + * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple + * times), then the result of calling SingleDelete() on this key is undefined. + * SingleDelete() only behaves correctly if there has been only one Put() + * for this key since the previous call to SingleDelete() for this key. + * + * This feature is currently an experimental performance optimization + * for a very specific workload. It is up to the caller to ensure that + * SingleDelete is only used for a key that is not deleted using Delete() or + * written using Merge(). Mixing SingleDelete operations with Deletes and + * Merges can result in undefined behavior. + * + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final byte[] key) throws RocksDBException { + singleDelete(nativeHandle_, key, key.length); + } + + /** + * Remove the database entry for {@code key}. Requires that the key exists + * and was not overwritten. It is not an error if the key did not exist + * in the database. + * + * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple + * times), then the result of calling SingleDelete() on this key is undefined. + * SingleDelete() only behaves correctly if there has been only one Put() + * for this key since the previous call to SingleDelete() for this key. + * + * This feature is currently an experimental performance optimization + * for a very specific workload. It is up to the caller to ensure that + * SingleDelete is only used for a key that is not deleted using Delete() or + * written using Merge(). Mixing SingleDelete operations with Deletes and + * Merges can result in undefined behavior. + * + * @param columnFamilyHandle The column family to delete the key from + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + singleDelete(nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Remove the database entry for {@code key}. Requires that the key exists + * and was not overwritten. It is not an error if the key did not exist + * in the database. + * + * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple + * times), then the result of calling SingleDelete() on this key is undefined. + * SingleDelete() only behaves correctly if there has been only one Put() + * for this key since the previous call to SingleDelete() for this key. + * + * This feature is currently an experimental performance optimization + * for a very specific workload. It is up to the caller to ensure that + * SingleDelete is only used for a key that is not deleted using Delete() or + * written using Merge(). Mixing SingleDelete operations with Deletes and + * Merges can result in undefined behavior. + * + * Note: consider setting {@link WriteOptions#setSync(boolean)} true. + * + * @param writeOpt Write options for the delete + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final WriteOptions writeOpt, final byte[] key) + throws RocksDBException { + singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length); + } + + /** + * Remove the database entry for {@code key}. Requires that the key exists + * and was not overwritten. It is not an error if the key did not exist + * in the database. + * + * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple + * times), then the result of calling SingleDelete() on this key is undefined. + * SingleDelete() only behaves correctly if there has been only one Put() + * for this key since the previous call to SingleDelete() for this key. + * + * This feature is currently an experimental performance optimization + * for a very specific workload. It is up to the caller to ensure that + * SingleDelete is only used for a key that is not deleted using Delete() or + * written using Merge(). Mixing SingleDelete operations with Deletes and + * Merges can result in undefined behavior. + * + * Note: consider setting {@link WriteOptions#setSync(boolean)} true. + * + * @param columnFamilyHandle The column family to delete the key from + * @param writeOpt Write options for the delete + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpt, final byte[] key) throws RocksDBException { + singleDelete(nativeHandle_, writeOpt.nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); + } + + + /** + * Removes the database entries in the range ["beginKey", "endKey"), i.e., + * including "beginKey" and excluding "endKey". a non-OK status on error. It + * is not an error if no keys exist in the range ["beginKey", "endKey"). + * + * Delete the database entry (if any) for "key". Returns OK on success, and a + * non-OK status on error. It is not an error if "key" did not exist in the + * database. + * + * @param beginKey First key to delete within database (inclusive) + * @param endKey Last key to delete within database (exclusive) + * + * @throws RocksDBException thrown if error happens in underlying native + * library. + */ + public void deleteRange(final byte[] beginKey, final byte[] endKey) + throws RocksDBException { + deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, + endKey.length); + } + + /** + * Removes the database entries in the range ["beginKey", "endKey"), i.e., + * including "beginKey" and excluding "endKey". a non-OK status on error. It + * is not an error if no keys exist in the range ["beginKey", "endKey"). + * + * Delete the database entry (if any) for "key". Returns OK on success, and a + * non-OK status on error. It is not an error if "key" did not exist in the + * database. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance + * @param beginKey First key to delete within database (inclusive) + * @param endKey Last key to delete within database (exclusive) + * + * @throws RocksDBException thrown if error happens in underlying native + * library. + */ + public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, + final byte[] beginKey, final byte[] endKey) throws RocksDBException { + deleteRange(nativeHandle_, beginKey, 0, beginKey.length, endKey, 0, + endKey.length, columnFamilyHandle.nativeHandle_); + } + + /** + * Removes the database entries in the range ["beginKey", "endKey"), i.e., + * including "beginKey" and excluding "endKey". a non-OK status on error. It + * is not an error if no keys exist in the range ["beginKey", "endKey"). + * + * Delete the database entry (if any) for "key". Returns OK on success, and a + * non-OK status on error. It is not an error if "key" did not exist in the + * database. + * + * @param writeOpt WriteOptions to be used with delete operation + * @param beginKey First key to delete within database (inclusive) + * @param endKey Last key to delete within database (exclusive) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void deleteRange(final WriteOptions writeOpt, final byte[] beginKey, + final byte[] endKey) throws RocksDBException { + deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, + beginKey.length, endKey, 0, endKey.length); + } + + /** + * Removes the database entries in the range ["beginKey", "endKey"), i.e., + * including "beginKey" and excluding "endKey". a non-OK status on error. It + * is not an error if no keys exist in the range ["beginKey", "endKey"). + * + * Delete the database entry (if any) for "key". Returns OK on success, and a + * non-OK status on error. It is not an error if "key" did not exist in the + * database. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance + * @param writeOpt WriteOptions to be used with delete operation + * @param beginKey First key to delete within database (included) + * @param endKey Last key to delete within database (excluded) + * + * @throws RocksDBException thrown if error happens in underlying native + * library. + */ + public void deleteRange(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpt, final byte[] beginKey, final byte[] endKey) + throws RocksDBException { + deleteRange(nativeHandle_, writeOpt.nativeHandle_, beginKey, 0, + beginKey.length, endKey, 0, endKey.length, + columnFamilyHandle.nativeHandle_); + } + + + /** + * Add merge operand for key/value pair. + * + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for the + * specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final byte[] key, final byte[] value) + throws RocksDBException { + merge(nativeHandle_, key, 0, key.length, value, 0, value.length); + } + + /** + * Add merge operand for key/value pair. + * + * @param key the specified key to be merged. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the value to be merged with the current value for the + * specified key. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and must be non-negative and no larger than + * ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void merge(final byte[] key, int offset, int len, final byte[] value, + final int vOffset, final int vLen) throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + merge(nativeHandle_, key, offset, len, value, vOffset, vLen); + } + + /** + * Add merge operand for key/value pair in a ColumnFamily. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value) throws RocksDBException { + merge(nativeHandle_, key, 0, key.length, value, 0, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Add merge operand for key/value pair in a ColumnFamily. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key the specified key to be merged. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the value to be merged with the current value for + * the specified key. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * must be non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void merge(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final int offset, final int len, final byte[] value, + final int vOffset, final int vLen) throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + merge(nativeHandle_, key, offset, len, value, vOffset, vLen, + columnFamilyHandle.nativeHandle_); + } + + /** + * Add merge operand for key/value pair. + * + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final WriteOptions writeOpts, final byte[] key, + final byte[] value) throws RocksDBException { + merge(nativeHandle_, writeOpts.nativeHandle_, + key, 0, key.length, value, 0, value.length); + } + + /** + * Add merge operand for key/value pair. + * + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("value".length - offset) + * @param value the value to be merged with the current value for + * the specified key. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void merge(final WriteOptions writeOpts, + final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + merge(nativeHandle_, writeOpts.nativeHandle_, + key, offset, len, value, vOffset, vLen); + } + + /** + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database. It is using position and limit. + * Supports direct buffer only. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final WriteOptions writeOpt, final ByteBuffer key) throws RocksDBException { + assert key.isDirect(); + deleteDirect(nativeHandle_, writeOpt.nativeHandle_, key, key.position(), key.remaining(), 0); + key.position(key.limit()); + } + + /** + * Delete the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database. It is using position and limit. + * Supports direct buffer only. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpt, + final ByteBuffer key) throws RocksDBException { + assert key.isDirect(); + deleteDirect(nativeHandle_, writeOpt.nativeHandle_, key, key.position(), key.remaining(), + columnFamilyHandle.nativeHandle_); + key.position(key.limit()); + } + + /** + * Add merge operand for key/value pair. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for the + * specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpts, final byte[] key, final byte[] value) + throws RocksDBException { + merge(nativeHandle_, writeOpts.nativeHandle_, + key, 0, key.length, value, 0, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Add merge operand for key/value pair. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the value to be merged with the current value for + * the specified key. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IndexOutOfBoundsException if an offset or length is out of bounds + */ + public void merge( + final ColumnFamilyHandle columnFamilyHandle, final WriteOptions writeOpts, + final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + merge(nativeHandle_, writeOpts.nativeHandle_, + key, offset, len, value, vOffset, vLen, + columnFamilyHandle.nativeHandle_); + } + + /** + * Apply the specified updates to the database. + * + * @param writeOpts WriteOptions instance + * @param updates WriteBatch instance + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void write(final WriteOptions writeOpts, final WriteBatch updates) + throws RocksDBException { + write0(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_); + } + + /** + * Apply the specified updates to the database. + * + * @param writeOpts WriteOptions instance + * @param updates WriteBatchWithIndex instance + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void write(final WriteOptions writeOpts, + final WriteBatchWithIndex updates) throws RocksDBException { + write1(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_); + } + + // TODO(AR) we should improve the #get() API, returning -1 (RocksDB.NOT_FOUND) is not very nice + // when we could communicate better status into, also the C++ code show that -2 could be returned + + /** + * Get the value associated with the specified key within column family* + * + * @param key the key to retrieve the value. + * @param value the out-value to receive the retrieved value. + * + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final byte[] key, final byte[] value) throws RocksDBException { + return get(nativeHandle_, key, 0, key.length, value, 0, value.length); + } + + /** + * Get the value associated with the specified key within column family* + * + * @param key the key to retrieve the value. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the out-value to receive the retrieved value. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "value".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and and no larger than ("value".length - offset) + * + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + return get(nativeHandle_, key, offset, len, value, vOffset, vLen); + } + + /** + * Get the value associated with the specified key within column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key to retrieve the value. + * @param value the out-value to receive the retrieved value. + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final byte[] value) throws RocksDBException, IllegalArgumentException { + return get(nativeHandle_, key, 0, key.length, value, 0, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Get the value associated with the specified key within column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key to retrieve the value. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * an no larger than ("key".length - offset) + * @param value the out-value to receive the retrieved value. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final int offset, final int len, final byte[] value, final int vOffset, + final int vLen) throws RocksDBException, IllegalArgumentException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + return get(nativeHandle_, key, offset, len, value, vOffset, vLen, + columnFamilyHandle.nativeHandle_); + } + + /** + * Get the value associated with the specified key. + * + * @param opt {@link org.rocksdb.ReadOptions} instance. + * @param key the key to retrieve the value. + * @param value the out-value to receive the retrieved value. + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final ReadOptions opt, final byte[] key, + final byte[] value) throws RocksDBException { + return get(nativeHandle_, opt.nativeHandle_, + key, 0, key.length, value, 0, value.length); + } + + /** + * Get the value associated with the specified key. + * + * @param opt {@link org.rocksdb.ReadOptions} instance. + * @param key the key to retrieve the value. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param value the out-value to receive the retrieved value. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, must be + * non-negative and no larger than ("value".length - offset) + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final ReadOptions opt, final byte[] key, final int offset, + final int len, final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + return get(nativeHandle_, opt.nativeHandle_, + key, offset, len, value, vOffset, vLen); + } + + /** + * Get the value associated with the specified key within column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param opt {@link org.rocksdb.ReadOptions} instance. + * @param key the key to retrieve the value. + * @param value the out-value to receive the retrieved value. + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions opt, final byte[] key, final byte[] value) + throws RocksDBException { + return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length, value, + 0, value.length, columnFamilyHandle.nativeHandle_); + } + + /** + * Get the value associated with the specified key within column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param opt {@link org.rocksdb.ReadOptions} instance. + * @param key the key to retrieve the value. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be + * non-negative and and no larger than ("key".length - offset) + * @param value the out-value to receive the retrieved value. + * @param vOffset the offset of the "value" array to be used, must be + * non-negative and no longer than "key".length + * @param vLen the length of the "value" array to be used, and must be + * non-negative and no larger than ("value".length - offset) + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions opt, final byte[] key, final int offset, final int len, + final byte[] value, final int vOffset, final int vLen) + throws RocksDBException { + checkBounds(offset, len, key.length); + checkBounds(vOffset, vLen, value.length); + return get(nativeHandle_, opt.nativeHandle_, key, offset, len, value, + vOffset, vLen, columnFamilyHandle.nativeHandle_); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param key the key retrieve the value. + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final byte[] key) throws RocksDBException { + return get(nativeHandle_, key, 0, key.length); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param key the key retrieve the value. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final byte[] key, final int offset, + final int len) throws RocksDBException { + checkBounds(offset, len, key.length); + return get(nativeHandle_, key, offset, len); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key retrieve the value. + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + return get(nativeHandle_, key, 0, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key retrieve the value. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final int offset, final int len) + throws RocksDBException { + checkBounds(offset, len, key.length); + return get(nativeHandle_, key, offset, len, + columnFamilyHandle.nativeHandle_); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param key the key retrieve the value. + * @param opt Read options. + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final ReadOptions opt, final byte[] key) + throws RocksDBException { + return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param key the key retrieve the value. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param opt Read options. + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final ReadOptions opt, final byte[] key, final int offset, + final int len) throws RocksDBException { + checkBounds(offset, len, key.length); + return get(nativeHandle_, opt.nativeHandle_, key, offset, len); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key retrieve the value. + * @param opt Read options. + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions opt, final byte[] key) throws RocksDBException { + return get(nativeHandle_, opt.nativeHandle_, key, 0, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key retrieve the value. + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than ("key".length - offset) + * @param opt Read options. + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions opt, final byte[] key, final int offset, final int len) + throws RocksDBException { + checkBounds(offset, len, key.length); + return get(nativeHandle_, opt.nativeHandle_, key, offset, len, + columnFamilyHandle.nativeHandle_); + } + + /** + * Takes a list of keys, and returns a list of values for the given list of + * keys. List will contain null for keys which could not be found. + * + * @param keys List of keys for which values need to be retrieved. + * @return List of values for the given list of keys. List will contain + * null for keys which could not be found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public List multiGetAsList(final List keys) + throws RocksDBException { + assert(keys.size() != 0); + + final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + final int[] keyOffsets = new int[keysArray.length]; + final int[] keyLengths = new int[keysArray.length]; + for(int i = 0; i < keyLengths.length; i++) { + keyLengths[i] = keysArray[i].length; + } + + return Arrays.asList(multiGet(nativeHandle_, keysArray, keyOffsets, + keyLengths)); + } + + /** + * Returns a list of values for the given list of keys. List will contain + * null for keys which could not be found. + *

+ * Note: Every key needs to have a related column family name in + * {@code columnFamilyHandleList}. + *

+ * + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys List of keys for which values need to be retrieved. + * @return List of values for the given list of keys. List will contain + * null for keys which could not be found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IllegalArgumentException thrown if the size of passed keys is not + * equal to the amount of passed column family handles. + */ + public List multiGetAsList( + final List columnFamilyHandleList, + final List keys) throws RocksDBException, + IllegalArgumentException { + assert(keys.size() != 0); + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (keys.size() != columnFamilyHandleList.size()) { + throw new IllegalArgumentException( + "For each key there must be a ColumnFamilyHandle."); + } + final long[] cfHandles = new long[columnFamilyHandleList.size()]; + for (int i = 0; i < columnFamilyHandleList.size(); i++) { + cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; + } + + final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + final int[] keyOffsets = new int[keysArray.length]; + final int[] keyLengths = new int[keysArray.length]; + for(int i = 0; i < keyLengths.length; i++) { + keyLengths[i] = keysArray[i].length; + } + + return Arrays.asList(multiGet(nativeHandle_, keysArray, keyOffsets, + keyLengths, cfHandles)); + } + + /** + * Returns a list of values for the given list of keys. List will contain + * null for keys which could not be found. + * + * @param opt Read options. + * @param keys of keys for which values need to be retrieved. + * @return List of values for the given list of keys. List will contain + * null for keys which could not be found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public List multiGetAsList(final ReadOptions opt, + final List keys) throws RocksDBException { + assert(keys.size() != 0); + + final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + final int[] keyOffsets = new int[keysArray.length]; + final int[] keyLengths = new int[keysArray.length]; + for(int i = 0; i < keyLengths.length; i++) { + keyLengths[i] = keysArray[i].length; + } + + return Arrays.asList(multiGet(nativeHandle_, opt.nativeHandle_, + keysArray, keyOffsets, keyLengths)); + } + + /** + * Returns a list of values for the given list of keys. List will contain + * null for keys which could not be found. + *

+ * Note: Every key needs to have a related column family name in + * {@code columnFamilyHandleList}. + *

+ * + * @param opt Read options. + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys of keys for which values need to be retrieved. + * @return List of values for the given list of keys. List will contain + * null for keys which could not be found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IllegalArgumentException thrown if the size of passed keys is not + * equal to the amount of passed column family handles. + */ + public List multiGetAsList(final ReadOptions opt, + final List columnFamilyHandleList, + final List keys) throws RocksDBException { + assert(keys.size() != 0); + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (keys.size()!=columnFamilyHandleList.size()){ + throw new IllegalArgumentException( + "For each key there must be a ColumnFamilyHandle."); + } + final long[] cfHandles = new long[columnFamilyHandleList.size()]; + for (int i = 0; i < columnFamilyHandleList.size(); i++) { + cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; + } + + final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + final int[] keyOffsets = new int[keysArray.length]; + final int[] keyLengths = new int[keysArray.length]; + for(int i = 0; i < keyLengths.length; i++) { + keyLengths[i] = keysArray[i].length; + } + + return Arrays.asList(multiGet(nativeHandle_, opt.nativeHandle_, + keysArray, keyOffsets, keyLengths, cfHandles)); + } + + /** + * Fetches a list of values for the given list of keys, all from the default column family. + * + * @param keys list of keys for which values need to be retrieved. + * @param values list of buffers to return retrieved values in + * @return list of number of bytes in DB for each requested key + * this can be more than the size of the corresponding buffer; then the buffer will be filled + * with the appropriate truncation of the database value. + * @throws RocksDBException if error happens in underlying native library. + * @throws IllegalArgumentException thrown if the number of passed keys and passed values + * do not match. + */ + public List multiGetByteBuffers( + final List keys, final List values) throws RocksDBException { + final ReadOptions readOptions = new ReadOptions(); + final List columnFamilyHandleList = new ArrayList<>(1); + columnFamilyHandleList.add(getDefaultColumnFamily()); + return multiGetByteBuffers(readOptions, columnFamilyHandleList, keys, values); + } + + /** + * Fetches a list of values for the given list of keys, all from the default column family. + * + * @param readOptions Read options + * @param keys list of keys for which values need to be retrieved. + * @param values list of buffers to return retrieved values in + * @throws RocksDBException if error happens in underlying native library. + * @throws IllegalArgumentException thrown if the number of passed keys and passed values + * do not match. + * @return the list of values for the given list of keys + */ + public List multiGetByteBuffers(final ReadOptions readOptions, + final List keys, final List values) throws RocksDBException { + final List columnFamilyHandleList = new ArrayList<>(1); + columnFamilyHandleList.add(getDefaultColumnFamily()); + return multiGetByteBuffers(readOptions, columnFamilyHandleList, keys, values); + } + + /** + * Fetches a list of values for the given list of keys. + *

+ * Note: Every key needs to have a related column family name in + * {@code columnFamilyHandleList}. + *

+ * + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys list of keys for which values need to be retrieved. + * @param values list of buffers to return retrieved values in + * @throws RocksDBException if error happens in underlying native library. + * @throws IllegalArgumentException thrown if the number of passed keys, passed values and + * passed column family handles do not match. + * @return the list of values for the given list of keys + */ + public List multiGetByteBuffers( + final List columnFamilyHandleList, final List keys, + final List values) throws RocksDBException { + final ReadOptions readOptions = new ReadOptions(); + return multiGetByteBuffers(readOptions, columnFamilyHandleList, keys, values); + } + + /** + * Fetches a list of values for the given list of keys. + *

+ * Note: Every key needs to have a related column family name in + * {@code columnFamilyHandleList}. + *

+ * + * @param readOptions Read options + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys list of keys for which values need to be retrieved. + * @param values list of buffers to return retrieved values in + * @throws RocksDBException if error happens in underlying native library. + * @throws IllegalArgumentException thrown if the number of passed keys, passed values and + * passed column family handles do not match. + * @return the list of values for the given list of keys + */ + public List multiGetByteBuffers(final ReadOptions readOptions, + final List columnFamilyHandleList, final List keys, + final List values) throws RocksDBException { + assert (keys.size() != 0); + + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (keys.size() != columnFamilyHandleList.size() && columnFamilyHandleList.size() > 1) { + throw new IllegalArgumentException( + "Wrong number of ColumnFamilyHandle(s) supplied. Provide 0, 1, or as many as there are key/value(s)"); + } + + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (values.size() != keys.size()) { + throw new IllegalArgumentException("For each key there must be a corresponding value."); + } + + // TODO (AP) support indirect buffers + for (final ByteBuffer key : keys) { + if (!key.isDirect()) { + throw new IllegalArgumentException("All key buffers must be direct byte buffers"); + } + } + + // TODO (AP) support indirect buffers, though probably via a less efficient code path + for (final ByteBuffer value : values) { + if (!value.isDirect()) { + throw new IllegalArgumentException("All value buffers must be direct byte buffers"); + } + } + + final int numCFHandles = columnFamilyHandleList.size(); + final long[] cfHandles = new long[numCFHandles]; + for (int i = 0; i < numCFHandles; i++) { + cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; + } + + final int numValues = keys.size(); + + final ByteBuffer[] keysArray = keys.toArray(new ByteBuffer[0]); + final int[] keyOffsets = new int[numValues]; + final int[] keyLengths = new int[numValues]; + for (int i = 0; i < numValues; i++) { + // TODO (AP) add keysArray[i].arrayOffset() if the buffer is indirect + // TODO (AP) because in that case we have to pass the array directly, + // so that the JNI C++ code will not know to compensate for the array offset + keyOffsets[i] = keysArray[i].position(); + keyLengths[i] = keysArray[i].limit(); + } + final ByteBuffer[] valuesArray = values.toArray(new ByteBuffer[0]); + final int[] valuesSizeArray = new int[numValues]; + final Status[] statusArray = new Status[numValues]; + + multiGet(nativeHandle_, readOptions.nativeHandle_, cfHandles, keysArray, keyOffsets, keyLengths, + valuesArray, valuesSizeArray, statusArray); + + final List results = new ArrayList<>(); + for (int i = 0; i < numValues; i++) { + final Status status = statusArray[i]; + if (status.getCode() == Status.Code.Ok) { + final ByteBuffer value = valuesArray[i]; + value.position(Math.min(valuesSizeArray[i], value.capacity())); + value.flip(); // prepare for read out + results.add(new ByteBufferGetStatus(status, valuesSizeArray[i], value)); + } else { + results.add(new ByteBufferGetStatus(status)); + } + } + + return results; + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a false negative. + * + * If the caller wants to obtain value when the key + * is found in memory, then {@code valueHolder} must be set. + * + * This check is potentially lighter-weight than invoking + * {@link #get(byte[])}. One way to make this lighter weight is to avoid + * doing any IOs. + * + * @param key byte array of a key to search for + * @param valueHolder non-null to retrieve the value if it is found, or null + * if the value is not needed. If non-null, upon return of the function, + * the {@code value} will be set if it could be retrieved. + * + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist(final byte[] key, + /* @Nullable */ final Holder valueHolder) { + return keyMayExist(key, 0, key.length, valueHolder); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a false negative. + * + * If the caller wants to obtain value when the key + * is found in memory, then {@code valueHolder} must be set. + * + * This check is potentially lighter-weight than invoking + * {@link #get(byte[], int, int)}. One way to make this lighter weight is to + * avoid doing any IOs. + * + * @param key byte array of a key to search for + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than "key".length + * @param valueHolder non-null to retrieve the value if it is found, or null + * if the value is not needed. If non-null, upon return of the function, + * the {@code value} will be set if it could be retrieved. + * + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist(final byte[] key, + final int offset, final int len, + /* @Nullable */ final Holder valueHolder) { + return keyMayExist((ColumnFamilyHandle)null, key, offset, len, valueHolder); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a false negative. + * + * If the caller wants to obtain value when the key + * is found in memory, then {@code valueHolder} must be set. + * + * This check is potentially lighter-weight than invoking + * {@link #get(ColumnFamilyHandle,byte[])}. One way to make this lighter + * weight is to avoid doing any IOs. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key byte array of a key to search for + * @param valueHolder non-null to retrieve the value if it is found, or null + * if the value is not needed. If non-null, upon return of the function, + * the {@code value} will be set if it could be retrieved. + * + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist( + final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + /* @Nullable */ final Holder valueHolder) { + return keyMayExist(columnFamilyHandle, key, 0, key.length, + valueHolder); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a false negative. + * + * If the caller wants to obtain value when the key + * is found in memory, then {@code valueHolder} must be set. + * + * This check is potentially lighter-weight than invoking + * {@link #get(ColumnFamilyHandle, byte[], int, int)}. One way to make this + * lighter weight is to avoid doing any IOs. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key byte array of a key to search for + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than "key".length + * @param valueHolder non-null to retrieve the value if it is found, or null + * if the value is not needed. If non-null, upon return of the function, + * the {@code value} will be set if it could be retrieved. + * + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist( + final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, int offset, int len, + /* @Nullable */ final Holder valueHolder) { + return keyMayExist(columnFamilyHandle, null, key, offset, len, + valueHolder); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a true negative. + * + * If the caller wants to obtain value when the key + * is found in memory, then {@code valueHolder} must be set. + * + * This check is potentially lighter-weight than invoking + * {@link #get(ReadOptions, byte[])}. One way to make this + * lighter weight is to avoid doing any IOs. + * + * @param readOptions {@link ReadOptions} instance + * @param key byte array of a key to search for + * @param valueHolder non-null to retrieve the value if it is found, or null + * if the value is not needed. If non-null, upon return of the function, + * the {@code value} will be set if it could be retrieved. + * + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist( + final ReadOptions readOptions, final byte[] key, + /* @Nullable */ final Holder valueHolder) { + return keyMayExist(readOptions, key, 0, key.length, + valueHolder); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a true negative. + * + * If the caller wants to obtain value when the key + * is found in memory, then {@code valueHolder} must be set. + * + * This check is potentially lighter-weight than invoking + * {@link #get(ReadOptions, byte[], int, int)}. One way to make this + * lighter weight is to avoid doing any IOs. + * + * @param readOptions {@link ReadOptions} instance + * @param key byte array of a key to search for + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than "key".length + * @param valueHolder non-null to retrieve the value if it is found, or null + * if the value is not needed. If non-null, upon return of the function, + * the {@code value} will be set if it could be retrieved. + * + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist( + final ReadOptions readOptions, + final byte[] key, final int offset, final int len, + /* @Nullable */ final Holder valueHolder) { + return keyMayExist(null, readOptions, + key, offset, len, valueHolder); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a true negative. + * + * If the caller wants to obtain value when the key + * is found in memory, then {@code valueHolder} must be set. + * + * This check is potentially lighter-weight than invoking + * {@link #get(ColumnFamilyHandle, ReadOptions, byte[])}. One way to make this + * lighter weight is to avoid doing any IOs. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param readOptions {@link ReadOptions} instance + * @param key byte array of a key to search for + * @param valueHolder non-null to retrieve the value if it is found, or null + * if the value is not needed. If non-null, upon return of the function, + * the {@code value} will be set if it could be retrieved. + * + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist( + final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions readOptions, final byte[] key, + /* @Nullable */ final Holder valueHolder) { + return keyMayExist(columnFamilyHandle, readOptions, + key, 0, key.length, valueHolder); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a false negative. + * + * If the caller wants to obtain value when the key + * is found in memory, then {@code valueHolder} must be set. + * + * This check is potentially lighter-weight than invoking + * {@link #get(ColumnFamilyHandle, ReadOptions, byte[], int, int)}. + * One way to make this lighter weight is to avoid doing any IOs. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param readOptions {@link ReadOptions} instance + * @param key byte array of a key to search for + * @param offset the offset of the "key" array to be used, must be + * non-negative and no larger than "key".length + * @param len the length of the "key" array to be used, must be non-negative + * and no larger than "key".length + * @param valueHolder non-null to retrieve the value if it is found, or null + * if the value is not needed. If non-null, upon return of the function, + * the {@code value} will be set if it could be retrieved. + * + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist( + final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions readOptions, + final byte[] key, final int offset, final int len, + /* @Nullable */ final Holder valueHolder) { + checkBounds(offset, len, key.length); + if (valueHolder == null) { + return keyMayExist(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + readOptions == null ? 0 : readOptions.nativeHandle_, + key, offset, len); + } else { + final byte[][] result = keyMayExistFoundValue( + nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + readOptions == null ? 0 : readOptions.nativeHandle_, + key, offset, len); + if (result[0][0] == 0x0) { + valueHolder.setValue(null); + return false; + } else if (result[0][0] == 0x1) { + valueHolder.setValue(null); + return true; + } else { + valueHolder.setValue(result[1]); + return true; + } + } + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a false negative. + * + * @param key bytebuffer containing the value of the key + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist(final ByteBuffer key) { + return keyMayExist(null, (ReadOptions) null, key); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a false negative. + * + * @param columnFamilyHandle the {@link ColumnFamilyHandle} to look for the key in + * @param key bytebuffer containing the value of the key + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key) { + return keyMayExist(columnFamilyHandle, (ReadOptions) null, key); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a false negative. + * + * @param readOptions the {@link ReadOptions} to use when reading the key/value + * @param key bytebuffer containing the value of the key + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist(final ReadOptions readOptions, final ByteBuffer key) { + return keyMayExist(null, readOptions, key); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns {@link KeyMayExist.KeyMayExistEnum#kNotExist}, + * otherwise if it can with best effort retreive the value, it returns {@link + * KeyMayExist.KeyMayExistEnum#kExistsWithValue} otherwise it returns {@link + * KeyMayExist.KeyMayExistEnum#kExistsWithoutValue}. The choice not to return a value which might + * exist is at the discretion of the implementation; the only guarantee is that {@link + * KeyMayExist.KeyMayExistEnum#kNotExist} is an assurance that the key does not exist. + * + * @param key bytebuffer containing the value of the key + * @param value bytebuffer which will receive a value if the key exists and a value is known + * @return a {@link KeyMayExist} object reporting if key may exist and if a value is provided + */ + public KeyMayExist keyMayExist(final ByteBuffer key, final ByteBuffer value) { + return keyMayExist(null, null, key, value); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns {@link KeyMayExist.KeyMayExistEnum#kNotExist}, + * otherwise if it can with best effort retreive the value, it returns {@link + * KeyMayExist.KeyMayExistEnum#kExistsWithValue} otherwise it returns {@link + * KeyMayExist.KeyMayExistEnum#kExistsWithoutValue}. The choice not to return a value which might + * exist is at the discretion of the implementation; the only guarantee is that {@link + * KeyMayExist.KeyMayExistEnum#kNotExist} is an assurance that the key does not exist. + * + * @param columnFamilyHandle the {@link ColumnFamilyHandle} to look for the key in + * @param key bytebuffer containing the value of the key + * @param value bytebuffer which will receive a value if the key exists and a value is known + * @return a {@link KeyMayExist} object reporting if key may exist and if a value is provided + */ + public KeyMayExist keyMayExist( + final ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key, final ByteBuffer value) { + return keyMayExist(columnFamilyHandle, null, key, value); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns {@link KeyMayExist.KeyMayExistEnum#kNotExist}, + * otherwise if it can with best effort retreive the value, it returns {@link + * KeyMayExist.KeyMayExistEnum#kExistsWithValue} otherwise it returns {@link + * KeyMayExist.KeyMayExistEnum#kExistsWithoutValue}. The choice not to return a value which might + * exist is at the discretion of the implementation; the only guarantee is that {@link + * KeyMayExist.KeyMayExistEnum#kNotExist} is an assurance that the key does not exist. + * + * @param readOptions the {@link ReadOptions} to use when reading the key/value + * @param key bytebuffer containing the value of the key + * @param value bytebuffer which will receive a value if the key exists and a value is known + * @return a {@link KeyMayExist} object reporting if key may exist and if a value is provided + */ + public KeyMayExist keyMayExist( + final ReadOptions readOptions, final ByteBuffer key, final ByteBuffer value) { + return keyMayExist(null, readOptions, key, value); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, otherwise it returns true if the key might exist. + * That is to say that this method is probabilistic and may return false + * positives, but never a false negative. + * + * @param columnFamilyHandle the {@link ColumnFamilyHandle} to look for the key in + * @param readOptions the {@link ReadOptions} to use when reading the key/value + * @param key bytebuffer containing the value of the key + * @return false if the key definitely does not exist in the database, + * otherwise true. + */ + public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions readOptions, final ByteBuffer key) { + assert key != null : "key ByteBuffer parameter cannot be null"; + assert key.isDirect() : "key parameter must be a direct ByteBuffer"; + return keyMayExistDirect(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + readOptions == null ? 0 : readOptions.nativeHandle_, key, key.position(), key.limit()); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns {@link KeyMayExist.KeyMayExistEnum#kNotExist}, + * otherwise if it can with best effort retreive the value, it returns {@link + * KeyMayExist.KeyMayExistEnum#kExistsWithValue} otherwise it returns {@link + * KeyMayExist.KeyMayExistEnum#kExistsWithoutValue}. The choice not to return a value which might + * exist is at the discretion of the implementation; the only guarantee is that {@link + * KeyMayExist.KeyMayExistEnum#kNotExist} is an assurance that the key does not exist. + * + * @param columnFamilyHandle the {@link ColumnFamilyHandle} to look for the key in + * @param readOptions the {@link ReadOptions} to use when reading the key/value + * @param key bytebuffer containing the value of the key + * @param value bytebuffer which will receive a value if the key exists and a value is known + * @return a {@link KeyMayExist} object reporting if key may exist and if a value is provided + */ + public KeyMayExist keyMayExist(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions readOptions, final ByteBuffer key, final ByteBuffer value) { + assert key != null : "key ByteBuffer parameter cannot be null"; + assert key.isDirect() : "key parameter must be a direct ByteBuffer"; + assert value + != null + : "value ByteBuffer parameter cannot be null. If you do not need the value, use a different version of the method"; + assert value.isDirect() : "value parameter must be a direct ByteBuffer"; + + final int[] result = keyMayExistDirectFoundValue(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + readOptions == null ? 0 : readOptions.nativeHandle_, key, key.position(), key.remaining(), + value, value.position(), value.remaining()); + final int valueLength = result[1]; + value.limit(value.position() + Math.min(valueLength, value.remaining())); + return new KeyMayExist(KeyMayExist.KeyMayExistEnum.values()[result[0]], valueLength); + } + + /** + *

Return a heap-allocated iterator over the contents of the + * database. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

+ * + *

Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

+ * + * @return instance of iterator object. + */ + public RocksIterator newIterator() { + return new RocksIterator(this, iterator(nativeHandle_)); + } + + /** + *

Return a heap-allocated iterator over the contents of the + * database. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

+ * + *

Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

+ * + * @param readOptions {@link ReadOptions} instance. + * @return instance of iterator object. + */ + public RocksIterator newIterator(final ReadOptions readOptions) { + return new RocksIterator(this, iterator(nativeHandle_, + readOptions.nativeHandle_)); + } + + /** + *

Return a heap-allocated iterator over the contents of a + * ColumnFamily. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

+ * + *

Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @return instance of iterator object. + */ + public RocksIterator newIterator( + final ColumnFamilyHandle columnFamilyHandle) { + return new RocksIterator(this, iteratorCF(nativeHandle_, + columnFamilyHandle.nativeHandle_)); + } + + /** + *

Return a heap-allocated iterator over the contents of a + * ColumnFamily. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

+ * + *

Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param readOptions {@link ReadOptions} instance. + * @return instance of iterator object. + */ + public RocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions readOptions) { + return new RocksIterator(this, iteratorCF(nativeHandle_, + columnFamilyHandle.nativeHandle_, readOptions.nativeHandle_)); + } + + /** + * Returns iterators from a consistent database state across multiple + * column families. Iterators are heap allocated and need to be deleted + * before the db is deleted + * + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator} + * instances + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public List newIterators( + final List columnFamilyHandleList) + throws RocksDBException { + return newIterators(columnFamilyHandleList, new ReadOptions()); + } + + /** + * Returns iterators from a consistent database state across multiple + * column families. Iterators are heap allocated and need to be deleted + * before the db is deleted + * + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param readOptions {@link ReadOptions} instance. + * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator} + * instances + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public List newIterators( + final List columnFamilyHandleList, + final ReadOptions readOptions) throws RocksDBException { + + final long[] columnFamilyHandles = new long[columnFamilyHandleList.size()]; + for (int i = 0; i < columnFamilyHandleList.size(); i++) { + columnFamilyHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; + } + + final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles, + readOptions.nativeHandle_); + + final List iterators = new ArrayList<>( + columnFamilyHandleList.size()); + for (int i=0; iReturn a handle to the current DB state. Iterators created with + * this handle will all observe a stable snapshot of the current DB + * state. The caller must call ReleaseSnapshot(result) when the + * snapshot is no longer needed.

+ * + *

nullptr will be returned if the DB fails to take a snapshot or does + * not support snapshot.

+ * + * @return Snapshot {@link Snapshot} instance + */ + public Snapshot getSnapshot() { + long snapshotHandle = getSnapshot(nativeHandle_); + if (snapshotHandle != 0) { + return new Snapshot(snapshotHandle); + } + return null; + } + + /** + * Release a previously acquired snapshot. + * + * The caller must not use "snapshot" after this call. + * + * @param snapshot {@link Snapshot} instance + */ + public void releaseSnapshot(final Snapshot snapshot) { + if (snapshot != null) { + releaseSnapshot(nativeHandle_, snapshot.nativeHandle_); + } + } + + /** + * DB implements can export properties about their state + * via this method on a per column family level. + * + *

If {@code property} is a valid property understood by this DB + * implementation, fills {@code value} with its current value and + * returns true. Otherwise returns false.

+ * + *

Valid property names include: + *

    + *
  • "rocksdb.num-files-at-level<N>" - return the number of files at + * level <N>, where <N> is an ASCII representation of a level + * number (e.g. "0").
  • + *
  • "rocksdb.stats" - returns a multi-line string that describes statistics + * about the internal operation of the DB.
  • + *
  • "rocksdb.sstables" - returns a multi-line string that describes all + * of the sstables that make up the db contents.
  • + *
+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family. + * @param property to be fetched. See above for examples + * @return property value + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public String getProperty( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final String property) throws RocksDBException { + return getProperty(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + property, property.length()); + } + + /** + * DB implementations can export properties about their state + * via this method. If "property" is a valid property understood by this + * DB implementation, fills "*value" with its current value and returns + * true. Otherwise returns false. + * + *

Valid property names include: + *

    + *
  • "rocksdb.num-files-at-level<N>" - return the number of files at + * level <N>, where <N> is an ASCII representation of a level + * number (e.g. "0").
  • + *
  • "rocksdb.stats" - returns a multi-line string that describes statistics + * about the internal operation of the DB.
  • + *
  • "rocksdb.sstables" - returns a multi-line string that describes all + * of the sstables that make up the db contents.
  • + *
+ * + * @param property to be fetched. See above for examples + * @return property value + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public String getProperty(final String property) throws RocksDBException { + return getProperty(null, property); + } + + + /** + * Gets a property map. + * + * @param property to be fetched. + * + * @return the property map + * + * @throws RocksDBException if an error happens in the underlying native code. + */ + public Map getMapProperty(final String property) + throws RocksDBException { + return getMapProperty(null, property); + } + + /** + * Gets a property map. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family. + * @param property to be fetched. + * + * @return the property map + * + * @throws RocksDBException if an error happens in the underlying native code. + */ + public Map getMapProperty( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final String property) throws RocksDBException { + return getMapProperty(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + property, property.length()); + } + + /** + *

Similar to GetProperty(), but only works for a subset of properties + * whose return value is a numerical value. Return the value as long.

+ * + *

Note: As the returned property is of type + * {@code uint64_t} on C++ side the returning value can be negative + * because Java supports in Java 7 only signed long values.

+ * + *

Java 7: To mitigate the problem of the non + * existent unsigned long tpye, values should be encapsulated using + * {@link java.math.BigInteger} to reflect the correct value. The correct + * behavior is guaranteed if {@code 2^64} is added to negative values.

+ * + *

Java 8: In Java 8 the value should be treated as + * unsigned long using provided methods of type {@link Long}.

+ * + * @param property to be fetched. + * + * @return numerical property value. + * + * @throws RocksDBException if an error happens in the underlying native code. + */ + public long getLongProperty(final String property) throws RocksDBException { + return getLongProperty(null, property); + } + + /** + *

Similar to GetProperty(), but only works for a subset of properties + * whose return value is a numerical value. Return the value as long.

+ * + *

Note: As the returned property is of type + * {@code uint64_t} on C++ side the returning value can be negative + * because Java supports in Java 7 only signed long values.

+ * + *

Java 7: To mitigate the problem of the non + * existent unsigned long tpye, values should be encapsulated using + * {@link java.math.BigInteger} to reflect the correct value. The correct + * behavior is guaranteed if {@code 2^64} is added to negative values.

+ * + *

Java 8: In Java 8 the value should be treated as + * unsigned long using provided methods of type {@link Long}.

+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family + * @param property to be fetched. + * + * @return numerical property value + * + * @throws RocksDBException if an error happens in the underlying native code. + */ + public long getLongProperty( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final String property) throws RocksDBException { + return getLongProperty(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + property, property.length()); + } + + /** + * Reset internal stats for DB and all column families. + * + * Note this doesn't reset {@link Options#statistics()} as it is not + * owned by DB. + * + * @throws RocksDBException if an error occurs whilst reseting the stats + */ + public void resetStats() throws RocksDBException { + resetStats(nativeHandle_); + } + + /** + *

Return sum of the getLongProperty of all the column families

+ * + *

Note: As the returned property is of type + * {@code uint64_t} on C++ side the returning value can be negative + * because Java supports in Java 7 only signed long values.

+ * + *

Java 7: To mitigate the problem of the non + * existent unsigned long tpye, values should be encapsulated using + * {@link java.math.BigInteger} to reflect the correct value. The correct + * behavior is guaranteed if {@code 2^64} is added to negative values.

+ * + *

Java 8: In Java 8 the value should be treated as + * unsigned long using provided methods of type {@link Long}.

+ * + * @param property to be fetched. + * + * @return numerical property value + * + * @throws RocksDBException if an error happens in the underlying native code. + */ + public long getAggregatedLongProperty(final String property) + throws RocksDBException { + return getAggregatedLongProperty(nativeHandle_, property, + property.length()); + } + + /** + * Get the approximate file system space used by keys in each range. + * + * Note that the returned sizes measure file system space usage, so + * if the user data compresses by a factor of ten, the returned + * sizes will be one-tenth the size of the corresponding user data size. + * + * If {@code sizeApproximationFlags} defines whether the returned size + * should include the recently written data in the mem-tables (if + * the mem-table type supports it), data serialized to disk, or both. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family + * @param ranges the ranges over which to approximate sizes + * @param sizeApproximationFlags flags to determine what to include in the + * approximation. + * + * @return the sizes + */ + public long[] getApproximateSizes( + /*@Nullable*/ final ColumnFamilyHandle columnFamilyHandle, + final List ranges, + final SizeApproximationFlag... sizeApproximationFlags) { + + byte flags = 0x0; + for (final SizeApproximationFlag sizeApproximationFlag + : sizeApproximationFlags) { + flags |= sizeApproximationFlag.getValue(); + } + + return getApproximateSizes(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + toRangeSliceHandles(ranges), flags); + } + + /** + * Get the approximate file system space used by keys in each range for + * the default column family. + * + * Note that the returned sizes measure file system space usage, so + * if the user data compresses by a factor of ten, the returned + * sizes will be one-tenth the size of the corresponding user data size. + * + * If {@code sizeApproximationFlags} defines whether the returned size + * should include the recently written data in the mem-tables (if + * the mem-table type supports it), data serialized to disk, or both. + * + * @param ranges the ranges over which to approximate sizes + * @param sizeApproximationFlags flags to determine what to include in the + * approximation. + * + * @return the sizes. + */ + public long[] getApproximateSizes(final List ranges, + final SizeApproximationFlag... sizeApproximationFlags) { + return getApproximateSizes(null, ranges, sizeApproximationFlags); + } + + public static class CountAndSize { + public final long count; + public final long size; + + public CountAndSize(final long count, final long size) { + this.count = count; + this.size = size; + } + } + + /** + * This method is similar to + * {@link #getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)}, + * except that it returns approximate number of records and size in memtables. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family + * @param range the ranges over which to get the memtable stats + * + * @return the count and size for the range + */ + public CountAndSize getApproximateMemTableStats( + /*@Nullable*/ final ColumnFamilyHandle columnFamilyHandle, + final Range range) { + final long[] result = getApproximateMemTableStats(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + range.start.getNativeHandle(), + range.limit.getNativeHandle()); + return new CountAndSize(result[0], result[1]); + } + + /** + * This method is similar to + * {@link #getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)}, + * except that it returns approximate number of records and size in memtables. + * + * @param range the ranges over which to get the memtable stats + * + * @return the count and size for the range + */ + public CountAndSize getApproximateMemTableStats( + final Range range) { + return getApproximateMemTableStats(null, range); + } + + /** + *

Range compaction of database.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

See also

+ *
    + *
  • {@link #compactRange(byte[], byte[])}
  • + *
+ * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange() throws RocksDBException { + compactRange(null); + } + + /** + *

Range compaction of column family.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

See also

+ *
    + *
  • + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} + *
  • + *
+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) + throws RocksDBException { + compactRange(nativeHandle_, null, -1, null, -1, 0, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + *

Range compaction of database.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

See also

+ *
    + *
  • {@link #compactRange()}
  • + *
+ * + * @param begin start of key range (included in range) + * @param end end of key range (excluded from range) + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange(final byte[] begin, final byte[] end) + throws RocksDBException { + compactRange(null, begin, end); + } + + /** + *

Range compaction of column family.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

See also

+ *
    + *
  • {@link #compactRange(ColumnFamilyHandle)}
  • + *
+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family. + * @param begin start of key range (included in range) + * @param end end of key range (excluded from range) + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final byte[] begin, final byte[] end) throws RocksDBException { + compactRange(nativeHandle_, + begin, begin == null ? -1 : begin.length, + end, end == null ? -1 : end.length, + 0, columnFamilyHandle == null ? 0: columnFamilyHandle.nativeHandle_); + } + + /** + *

Range compaction of column family.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance. + * @param begin start of key range (included in range) + * @param end end of key range (excluded from range) + * @param compactRangeOptions options for the compaction + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final byte[] begin, final byte[] end, + final CompactRangeOptions compactRangeOptions) throws RocksDBException { + compactRange(nativeHandle_, + begin, begin == null ? -1 : begin.length, + end, end == null ? -1 : end.length, + compactRangeOptions.nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + * Change the options for the column family handle. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family. + * @param mutableColumnFamilyOptions the options. + * + * @throws RocksDBException if an error occurs whilst setting the options + */ + public void setOptions( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle, + final MutableColumnFamilyOptions mutableColumnFamilyOptions) + throws RocksDBException { + setOptions(nativeHandle_, columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + mutableColumnFamilyOptions.getKeys(), mutableColumnFamilyOptions.getValues()); + } + + /** + * Get the options for the column family handle + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance, or null for the default column family. + * + * @return the options parsed from the options string return by RocksDB + * + * @throws RocksDBException if an error occurs while getting the options string, or parsing the + * resulting options string into options + */ + public MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder getOptions( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) throws RocksDBException { + String optionsString = getOptions( + nativeHandle_, columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + return MutableColumnFamilyOptions.parse(optionsString, true); + } + + /** + * Default column family options + * + * @return the options parsed from the options string return by RocksDB + * + * @throws RocksDBException if an error occurs while getting the options string, or parsing the + * resulting options string into options + */ + public MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder getOptions() + throws RocksDBException { + return getOptions(null); + } + + /** + * Get the database options + * + * @return the DB options parsed from the options string return by RocksDB + * + * @throws RocksDBException if an error occurs while getting the options string, or parsing the + * resulting options string into options + */ + public MutableDBOptions.MutableDBOptionsBuilder getDBOptions() throws RocksDBException { + String optionsString = getDBOptions(nativeHandle_); + return MutableDBOptions.parse(optionsString, true); + } + + /** + * Change the options for the default column family handle. + * + * @param mutableColumnFamilyOptions the options. + * + * @throws RocksDBException if an error occurs whilst setting the options + */ + public void setOptions( + final MutableColumnFamilyOptions mutableColumnFamilyOptions) + throws RocksDBException { + setOptions(null, mutableColumnFamilyOptions); + } + + /** + * Set the options for the column family handle. + * + * @param mutableDBoptions the options. + * + * @throws RocksDBException if an error occurs whilst setting the options + */ + public void setDBOptions(final MutableDBOptions mutableDBoptions) + throws RocksDBException { + setDBOptions(nativeHandle_, + mutableDBoptions.getKeys(), + mutableDBoptions.getValues()); + } + + /** + * Takes a list of files specified by file names and + * compacts them to the specified level. + * + * Note that the behavior is different from + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} + * in that CompactFiles() performs the compaction job using the CURRENT + * thread. + * + * @param compactionOptions compaction options + * @param inputFileNames the name of the files to compact + * @param outputLevel the level to which they should be compacted + * @param outputPathId the id of the output path, or -1 + * @param compactionJobInfo the compaction job info, this parameter + * will be updated with the info from compacting the files, + * can just be null if you don't need it. + * + * @return the list of compacted files + * + * @throws RocksDBException if an error occurs during compaction + */ + public List compactFiles( + final CompactionOptions compactionOptions, + final List inputFileNames, + final int outputLevel, + final int outputPathId, + /* @Nullable */ final CompactionJobInfo compactionJobInfo) + throws RocksDBException { + return compactFiles(compactionOptions, null, inputFileNames, outputLevel, + outputPathId, compactionJobInfo); + } + + /** + * Takes a list of files specified by file names and + * compacts them to the specified level. + * + * Note that the behavior is different from + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} + * in that CompactFiles() performs the compaction job using the CURRENT + * thread. + * + * @param compactionOptions compaction options + * @param columnFamilyHandle columnFamilyHandle, or null for the + * default column family + * @param inputFileNames the name of the files to compact + * @param outputLevel the level to which they should be compacted + * @param outputPathId the id of the output path, or -1 + * @param compactionJobInfo the compaction job info, this parameter + * will be updated with the info from compacting the files, + * can just be null if you don't need it. + * + * @return the list of compacted files + * + * @throws RocksDBException if an error occurs during compaction + */ + public List compactFiles( + final CompactionOptions compactionOptions, + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle, + final List inputFileNames, + final int outputLevel, + final int outputPathId, + /* @Nullable */ final CompactionJobInfo compactionJobInfo) + throws RocksDBException { + return Arrays.asList(compactFiles(nativeHandle_, compactionOptions.nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + inputFileNames.toArray(new String[0]), + outputLevel, + outputPathId, + compactionJobInfo == null ? 0 : compactionJobInfo.nativeHandle_)); + } + + /** + * This function will cancel all currently running background processes. + * + * @param wait if true, wait for all background work to be cancelled before + * returning. + * + */ + public void cancelAllBackgroundWork(boolean wait) { + cancelAllBackgroundWork(nativeHandle_, wait); + } + + /** + * This function will wait until all currently running background processes + * finish. After it returns, no background process will be run until + * {@link #continueBackgroundWork()} is called + * + * @throws RocksDBException if an error occurs when pausing background work + */ + public void pauseBackgroundWork() throws RocksDBException { + pauseBackgroundWork(nativeHandle_); + } + + /** + * Resumes background work which was suspended by + * previously calling {@link #pauseBackgroundWork()} + * + * @throws RocksDBException if an error occurs when resuming background work + */ + public void continueBackgroundWork() throws RocksDBException { + continueBackgroundWork(nativeHandle_); + } + + /** + * Enable automatic compactions for the given column + * families if they were previously disabled. + * + * The function will first set the + * {@link ColumnFamilyOptions#disableAutoCompactions()} option for each + * column family to false, after which it will schedule a flush/compaction. + * + * NOTE: Setting disableAutoCompactions to 'false' through + * {@link #setOptions(ColumnFamilyHandle, MutableColumnFamilyOptions)} + * does NOT schedule a flush/compaction afterwards, and only changes the + * parameter itself within the column family option. + * + * @param columnFamilyHandles the column family handles + * + * @throws RocksDBException if an error occurs whilst enabling auto-compaction + */ + public void enableAutoCompaction( + final List columnFamilyHandles) + throws RocksDBException { + enableAutoCompaction(nativeHandle_, + toNativeHandleList(columnFamilyHandles)); + } + + /** + * Number of levels used for this DB. + * + * @return the number of levels + */ + public int numberLevels() { + return numberLevels(null); + } + + /** + * Number of levels used for a column family in this DB. + * + * @param columnFamilyHandle the column family handle, or null + * for the default column family + * + * @return the number of levels + */ + public int numberLevels(/* @Nullable */final ColumnFamilyHandle columnFamilyHandle) { + return numberLevels(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + * Maximum level to which a new compacted memtable is pushed if it + * does not create overlap. + * + * @return the maximum level + */ + public int maxMemCompactionLevel() { + return maxMemCompactionLevel(null); + } + + /** + * Maximum level to which a new compacted memtable is pushed if it + * does not create overlap. + * + * @param columnFamilyHandle the column family handle + * + * @return the maximum level + */ + public int maxMemCompactionLevel( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) { + return maxMemCompactionLevel(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + * Number of files in level-0 that would stop writes. + * + * @return the number of files + */ + public int level0StopWriteTrigger() { + return level0StopWriteTrigger(null); + } + + /** + * Number of files in level-0 that would stop writes. + * + * @param columnFamilyHandle the column family handle + * + * @return the number of files + */ + public int level0StopWriteTrigger( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle) { + return level0StopWriteTrigger(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + * Get DB name -- the exact same name that was provided as an argument to + * as path to {@link #open(Options, String)}. + * + * @return the DB name + */ + public String getName() { + return getName(nativeHandle_); + } + + /** + * Get the Env object from the DB + * + * @return the env + */ + public Env getEnv() { + final long envHandle = getEnv(nativeHandle_); + if (envHandle == Env.getDefault().nativeHandle_) { + return Env.getDefault(); + } else { + final Env env = new RocksEnv(envHandle); + env.disOwnNativeHandle(); // we do not own the Env! + return env; + } + } + + /** + *

Flush all memory table data.

+ * + *

Note: it must be ensured that the FlushOptions instance + * is not GC'ed before this method finishes. If the wait parameter is + * set to false, flush processing is asynchronous.

+ * + * @param flushOptions {@link org.rocksdb.FlushOptions} instance. + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void flush(final FlushOptions flushOptions) + throws RocksDBException { + flush(flushOptions, (List) null); + } + + /** + *

Flush all memory table data.

+ * + *

Note: it must be ensured that the FlushOptions instance + * is not GC'ed before this method finishes. If the wait parameter is + * set to false, flush processing is asynchronous.

+ * + * @param flushOptions {@link org.rocksdb.FlushOptions} instance. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance. + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void flush(final FlushOptions flushOptions, + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) + throws RocksDBException { + flush(flushOptions, + columnFamilyHandle == null ? null : Arrays.asList(columnFamilyHandle)); + } + + /** + * Flushes multiple column families. + * + * If atomic flush is not enabled, this is equivalent to calling + * {@link #flush(FlushOptions, ColumnFamilyHandle)} multiple times. + * + * If atomic flush is enabled, this will flush all column families + * specified up to the latest sequence number at the time when flush is + * requested. + * + * @param flushOptions {@link org.rocksdb.FlushOptions} instance. + * @param columnFamilyHandles column family handles. + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void flush(final FlushOptions flushOptions, + /* @Nullable */ final List columnFamilyHandles) + throws RocksDBException { + flush(nativeHandle_, flushOptions.nativeHandle_, + toNativeHandleList(columnFamilyHandles)); + } + + /** + * Flush the WAL memory buffer to the file. If {@code sync} is true, + * it calls {@link #syncWal()} afterwards. + * + * @param sync true to also fsync to disk. + * + * @throws RocksDBException if an error occurs whilst flushing + */ + public void flushWal(final boolean sync) throws RocksDBException { + flushWal(nativeHandle_, sync); + } + + /** + * Sync the WAL. + * + * Note that {@link #write(WriteOptions, WriteBatch)} followed by + * {@link #syncWal()} is not exactly the same as + * {@link #write(WriteOptions, WriteBatch)} with + * {@link WriteOptions#sync()} set to true; In the latter case the changes + * won't be visible until the sync is done. + * + * Currently only works if {@link Options#allowMmapWrites()} is set to false. + * + * @throws RocksDBException if an error occurs whilst syncing + */ + public void syncWal() throws RocksDBException { + syncWal(nativeHandle_); + } + + /** + *

The sequence number of the most recent transaction.

+ * + * @return sequence number of the most + * recent transaction. + */ + public long getLatestSequenceNumber() { + return getLatestSequenceNumber(nativeHandle_); + } + + /** + *

Prevent file deletions. Compactions will continue to occur, + * but no obsolete files will be deleted. Calling this multiple + * times have the same effect as calling it once.

+ * + * @throws RocksDBException thrown if operation was not performed + * successfully. + */ + public void disableFileDeletions() throws RocksDBException { + disableFileDeletions(nativeHandle_); + } + + /** + *

Allow compactions to delete obsolete files. + * If force == true, the call to EnableFileDeletions() + * will guarantee that file deletions are enabled after + * the call, even if DisableFileDeletions() was called + * multiple times before.

+ * + *

If force == false, EnableFileDeletions will only + * enable file deletion after it's been called at least + * as many times as DisableFileDeletions(), enabling + * the two methods to be called by two threads + * concurrently without synchronization + * -- i.e., file deletions will be enabled only after both + * threads call EnableFileDeletions()

+ * + * @param force boolean value described above. + * + * @throws RocksDBException thrown if operation was not performed + * successfully. + */ + public void enableFileDeletions(final boolean force) + throws RocksDBException { + enableFileDeletions(nativeHandle_, force); + } + + public static class LiveFiles { + /** + * The valid size of the manifest file. The manifest file is an ever growing + * file, but only the portion specified here is valid for this snapshot. + */ + public final long manifestFileSize; + + /** + * The files are relative to the {@link #getName()} and are not + * absolute paths. Despite being relative paths, the file names begin + * with "/". + */ + public final List files; + + LiveFiles(final long manifestFileSize, final List files) { + this.manifestFileSize = manifestFileSize; + this.files = files; + } + } + + /** + * Retrieve the list of all files in the database after flushing the memtable. + * + * See {@link #getLiveFiles(boolean)}. + * + * @return the live files + * + * @throws RocksDBException if an error occurs whilst retrieving the list + * of live files + */ + public LiveFiles getLiveFiles() throws RocksDBException { + return getLiveFiles(true); + } + + /** + * Retrieve the list of all files in the database. + * + * In case you have multiple column families, even if {@code flushMemtable} + * is true, you still need to call {@link #getSortedWalFiles()} + * after {@link #getLiveFiles(boolean)} to compensate for new data that + * arrived to already-flushed column families while other column families + * were flushing. + * + * NOTE: Calling {@link #getLiveFiles(boolean)} followed by + * {@link #getSortedWalFiles()} can generate a lossless backup. + * + * @param flushMemtable set to true to flush before recoding the live + * files. Setting to false is useful when we don't want to wait for flush + * which may have to wait for compaction to complete taking an + * indeterminate time. + * + * @return the live files + * + * @throws RocksDBException if an error occurs whilst retrieving the list + * of live files + */ + public LiveFiles getLiveFiles(final boolean flushMemtable) + throws RocksDBException { + final String[] result = getLiveFiles(nativeHandle_, flushMemtable); + if (result == null) { + return null; + } + final String[] files = Arrays.copyOf(result, result.length - 1); + final long manifestFileSize = Long.parseLong(result[result.length - 1]); + + return new LiveFiles(manifestFileSize, Arrays.asList(files)); + } + + /** + * Retrieve the sorted list of all wal files with earliest file first. + * + * @return the log files + * + * @throws RocksDBException if an error occurs whilst retrieving the list + * of sorted WAL files + */ + public List getSortedWalFiles() throws RocksDBException { + final LogFile[] logFiles = getSortedWalFiles(nativeHandle_); + return Arrays.asList(logFiles); + } + + /** + *

Returns an iterator that is positioned at a write-batch containing + * seq_number. If the sequence number is non existent, it returns an iterator + * at the first available seq_no after the requested seq_no.

+ * + *

Must set WAL_ttl_seconds or WAL_size_limit_MB to large values to + * use this api, else the WAL files will get + * cleared aggressively and the iterator might keep getting invalid before + * an update is read.

+ * + * @param sequenceNumber sequence number offset + * + * @return {@link org.rocksdb.TransactionLogIterator} instance. + * + * @throws org.rocksdb.RocksDBException if iterator cannot be retrieved + * from native-side. + */ + public TransactionLogIterator getUpdatesSince(final long sequenceNumber) + throws RocksDBException { + return new TransactionLogIterator( + getUpdatesSince(nativeHandle_, sequenceNumber)); + } + + /** + * Delete the file name from the db directory and update the internal state to + * reflect that. Supports deletion of sst and log files only. 'name' must be + * path relative to the db directory. eg. 000001.sst, /archive/000003.log + * + * @param name the file name + * + * @throws RocksDBException if an error occurs whilst deleting the file + */ + public void deleteFile(final String name) throws RocksDBException { + deleteFile(nativeHandle_, name); + } + + /** + * Gets a list of all table files metadata. + * + * @return table files metadata. + */ + public List getLiveFilesMetaData() { + return Arrays.asList(getLiveFilesMetaData(nativeHandle_)); + } + + /** + * Obtains the meta data of the specified column family of the DB. + * + * @param columnFamilyHandle the column family + * + * @return the column family metadata + */ + public ColumnFamilyMetaData getColumnFamilyMetaData( + /* @Nullable */ final ColumnFamilyHandle columnFamilyHandle) { + return getColumnFamilyMetaData(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + * Obtains the meta data of the default column family of the DB. + * + * @return the column family metadata + */ + public ColumnFamilyMetaData getColumnFamilyMetaData() { + return getColumnFamilyMetaData(null); + } + + /** + * ingestExternalFile will load a list of external SST files (1) into the DB + * We will try to find the lowest possible level that the file can fit in, and + * ingest the file into this level (2). A file that have a key range that + * overlap with the memtable key range will require us to Flush the memtable + * first before ingesting the file. + * + * (1) External SST files can be created using {@link SstFileWriter} + * (2) We will try to ingest the files to the lowest possible level + * even if the file compression doesn't match the level compression + * + * @param filePathList The list of files to ingest + * @param ingestExternalFileOptions the options for the ingestion + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void ingestExternalFile(final List filePathList, + final IngestExternalFileOptions ingestExternalFileOptions) + throws RocksDBException { + ingestExternalFile(nativeHandle_, getDefaultColumnFamily().nativeHandle_, + filePathList.toArray(new String[0]), + filePathList.size(), ingestExternalFileOptions.nativeHandle_); + } + + /** + * ingestExternalFile will load a list of external SST files (1) into the DB + * We will try to find the lowest possible level that the file can fit in, and + * ingest the file into this level (2). A file that have a key range that + * overlap with the memtable key range will require us to Flush the memtable + * first before ingesting the file. + * + * (1) External SST files can be created using {@link SstFileWriter} + * (2) We will try to ingest the files to the lowest possible level + * even if the file compression doesn't match the level compression + * + * @param columnFamilyHandle The column family for the ingested files + * @param filePathList The list of files to ingest + * @param ingestExternalFileOptions the options for the ingestion + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void ingestExternalFile(final ColumnFamilyHandle columnFamilyHandle, + final List filePathList, + final IngestExternalFileOptions ingestExternalFileOptions) + throws RocksDBException { + ingestExternalFile(nativeHandle_, columnFamilyHandle.nativeHandle_, + filePathList.toArray(new String[0]), + filePathList.size(), ingestExternalFileOptions.nativeHandle_); + } + + /** + * Verify checksum + * + * @throws RocksDBException if the checksum is not valid + */ + public void verifyChecksum() throws RocksDBException { + verifyChecksum(nativeHandle_); + } + + /** + * Gets the handle for the default column family + * + * @return The handle of the default column family + */ + public ColumnFamilyHandle getDefaultColumnFamily() { + final ColumnFamilyHandle cfHandle = new ColumnFamilyHandle(this, + getDefaultColumnFamily(nativeHandle_)); + cfHandle.disOwnNativeHandle(); + return cfHandle; + } + + /** + * Get the properties of all tables. + * + * @param columnFamilyHandle the column family handle, or null for the default + * column family. + * + * @return the properties + * + * @throws RocksDBException if an error occurs whilst getting the properties + */ + public Map getPropertiesOfAllTables( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle) + throws RocksDBException { + return getPropertiesOfAllTables(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + } + + /** + * Get the properties of all tables in the default column family. + * + * @return the properties + * + * @throws RocksDBException if an error occurs whilst getting the properties + */ + public Map getPropertiesOfAllTables() + throws RocksDBException { + return getPropertiesOfAllTables(null); + } + + /** + * Get the properties of tables in range. + * + * @param columnFamilyHandle the column family handle, or null for the default + * column family. + * @param ranges the ranges over which to get the table properties + * + * @return the properties + * + * @throws RocksDBException if an error occurs whilst getting the properties + */ + public Map getPropertiesOfTablesInRange( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle, + final List ranges) throws RocksDBException { + return getPropertiesOfTablesInRange(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + toRangeSliceHandles(ranges)); + } + + /** + * Get the properties of tables in range for the default column family. + * + * @param ranges the ranges over which to get the table properties + * + * @return the properties + * + * @throws RocksDBException if an error occurs whilst getting the properties + */ + public Map getPropertiesOfTablesInRange( + final List ranges) throws RocksDBException { + return getPropertiesOfTablesInRange(null, ranges); + } + + /** + * Suggest the range to compact. + * + * @param columnFamilyHandle the column family handle, or null for the default + * column family. + * + * @return the suggested range. + * + * @throws RocksDBException if an error occurs whilst suggesting the range + */ + public Range suggestCompactRange( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle) + throws RocksDBException { + final long[] rangeSliceHandles = suggestCompactRange(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_); + return new Range(new Slice(rangeSliceHandles[0]), + new Slice(rangeSliceHandles[1])); + } + + /** + * Suggest the range to compact for the default column family. + * + * @return the suggested range. + * + * @throws RocksDBException if an error occurs whilst suggesting the range + */ + public Range suggestCompactRange() + throws RocksDBException { + return suggestCompactRange(null); + } + + /** + * Promote L0. + * + * @param columnFamilyHandle the column family handle, + * or null for the default column family. + * @param targetLevel the target level for L0 + * + * @throws RocksDBException if an error occurs whilst promoting L0 + */ + public void promoteL0( + /* @Nullable */final ColumnFamilyHandle columnFamilyHandle, + final int targetLevel) throws RocksDBException { + promoteL0(nativeHandle_, + columnFamilyHandle == null ? 0 : columnFamilyHandle.nativeHandle_, + targetLevel); + } + + /** + * Promote L0 for the default column family. + * + * @param targetLevel the target level for L0 + * + * @throws RocksDBException if an error occurs whilst promoting L0 + */ + public void promoteL0(final int targetLevel) + throws RocksDBException { + promoteL0(null, targetLevel); + } + + /** + * Trace DB operations. + * + * Use {@link #endTrace()} to stop tracing. + * + * @param traceOptions the options + * @param traceWriter the trace writer + * + * @throws RocksDBException if an error occurs whilst starting the trace + */ + public void startTrace(final TraceOptions traceOptions, + final AbstractTraceWriter traceWriter) throws RocksDBException { + startTrace(nativeHandle_, traceOptions.getMaxTraceFileSize(), + traceWriter.nativeHandle_); + /** + * NOTE: {@link #startTrace(long, long, long) transfers the ownership + * from Java to C++, so we must disown the native handle here. + */ + traceWriter.disOwnNativeHandle(); + } + + /** + * Stop tracing DB operations. + * + * See {@link #startTrace(TraceOptions, AbstractTraceWriter)} + * + * @throws RocksDBException if an error occurs whilst ending the trace + */ + public void endTrace() throws RocksDBException { + endTrace(nativeHandle_); + } + + /** + * Make the secondary instance catch up with the primary by tailing and + * replaying the MANIFEST and WAL of the primary. + * Column families created by the primary after the secondary instance starts + * will be ignored unless the secondary instance closes and restarts with the + * newly created column families. + * Column families that exist before secondary instance starts and dropped by + * the primary afterwards will be marked as dropped. However, as long as the + * secondary instance does not delete the corresponding column family + * handles, the data of the column family is still accessible to the + * secondary. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void tryCatchUpWithPrimary() throws RocksDBException { + tryCatchUpWithPrimary(nativeHandle_); + } + + /** + * Delete files in multiple ranges at once. + * Delete files in a lot of ranges one at a time can be slow, use this API for + * better performance in that case. + * + * @param columnFamily - The column family for operation (null for default) + * @param includeEnd - Whether ranges should include end + * @param ranges - pairs of ranges (from1, to1, from2, to2, ...) + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void deleteFilesInRanges(final ColumnFamilyHandle columnFamily, + final List ranges, final boolean includeEnd) + throws RocksDBException { + if (ranges.size() == 0) { + return; + } + if ((ranges.size() % 2) != 0) { + throw new IllegalArgumentException("Ranges size needs to be multiple of 2 " + + "(from1, to1, from2, to2, ...), but is " + ranges.size()); + } + + final byte[][] rangesArray = ranges.toArray(new byte[ranges.size()][]); + + deleteFilesInRanges(nativeHandle_, columnFamily == null ? 0 : columnFamily.nativeHandle_, + rangesArray, includeEnd); + } + + /** + * Static method to destroy the contents of the specified database. + * Be very careful using this method. + * + * @param path the path to the Rocksdb database. + * @param options {@link org.rocksdb.Options} instance. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static void destroyDB(final String path, final Options options) + throws RocksDBException { + destroyDB(path, options.nativeHandle_); + } + + private /* @Nullable */ long[] toNativeHandleList( + /* @Nullable */ final List objectList) { + if (objectList == null) { + return null; + } + final int len = objectList.size(); + final long[] handleList = new long[len]; + for (int i = 0; i < len; i++) { + handleList[i] = objectList.get(i).nativeHandle_; + } + return handleList; + } + + private static long[] toRangeSliceHandles(final List ranges) { + final long rangeSliceHandles[] = new long [ranges.size() * 2]; + for (int i = 0, j = 0; i < ranges.size(); i++) { + final Range range = ranges.get(i); + rangeSliceHandles[j++] = range.start.getNativeHandle(); + rangeSliceHandles[j++] = range.limit.getNativeHandle(); + } + return rangeSliceHandles; + } + + protected void storeOptionsInstance(DBOptionsInterface options) { + options_ = options; + } + + private static void checkBounds(int offset, int len, int size) { + if ((offset | len | (offset + len) | (size - (offset + len))) < 0) { + throw new IndexOutOfBoundsException(String.format("offset(%d), len(%d), size(%d)", offset, len, size)); + } + } + + private static int computeCapacityHint(final int estimatedNumberOfItems) { + // Default load factor for HashMap is 0.75, so N * 1.5 will be at the load + // limit. We add +1 for a buffer. + return (int)Math.ceil(estimatedNumberOfItems * 1.5 + 1.0); + } + + // native methods + private native static long open(final long optionsHandle, + final String path) throws RocksDBException; + + /** + * @param optionsHandle Native handle pointing to an Options object + * @param path The directory path for the database files + * @param columnFamilyNames An array of column family names + * @param columnFamilyOptions An array of native handles pointing to + * ColumnFamilyOptions objects + * + * @return An array of native handles, [0] is the handle of the RocksDB object + * [1..1+n] are handles of the ColumnFamilyReferences + * + * @throws RocksDBException thrown if the database could not be opened + */ + private native static long[] open(final long optionsHandle, + final String path, final byte[][] columnFamilyNames, + final long[] columnFamilyOptions) throws RocksDBException; + + private native static long openROnly(final long optionsHandle, final String path, + final boolean errorIfWalFileExists) throws RocksDBException; + + /** + * @param optionsHandle Native handle pointing to an Options object + * @param path The directory path for the database files + * @param columnFamilyNames An array of column family names + * @param columnFamilyOptions An array of native handles pointing to + * ColumnFamilyOptions objects + * + * @return An array of native handles, [0] is the handle of the RocksDB object + * [1..1+n] are handles of the ColumnFamilyReferences + * + * @throws RocksDBException thrown if the database could not be opened + */ + private native static long[] openROnly(final long optionsHandle, final String path, + final byte[][] columnFamilyNames, final long[] columnFamilyOptions, + final boolean errorIfWalFileExists) throws RocksDBException; + + private native static long openAsSecondary(final long optionsHandle, final String path, + final String secondaryPath) throws RocksDBException; + + private native static long[] openAsSecondary(final long optionsHandle, final String path, + final String secondaryPath, final byte[][] columnFamilyNames, + final long[] columnFamilyOptions) throws RocksDBException; + + @Override protected native void disposeInternal(final long handle); + + private native static void closeDatabase(final long handle) + throws RocksDBException; + private native static byte[][] listColumnFamilies(final long optionsHandle, + final String path) throws RocksDBException; + private native long createColumnFamily(final long handle, + final byte[] columnFamilyName, final int columnFamilyNamelen, + final long columnFamilyOptions) throws RocksDBException; + private native long[] createColumnFamilies(final long handle, + final long columnFamilyOptionsHandle, final byte[][] columnFamilyNames) + throws RocksDBException; + private native long[] createColumnFamilies(final long handle, + final long columnFamilyOptionsHandles[], final byte[][] columnFamilyNames) + throws RocksDBException; + private native void dropColumnFamily( + final long handle, final long cfHandle) throws RocksDBException; + private native void dropColumnFamilies(final long handle, + final long[] cfHandles) throws RocksDBException; + private native void put(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final byte[] value, + final int valueOffset, int valueLength) throws RocksDBException; + private native void put(final long handle, final byte[] key, final int keyOffset, + final int keyLength, final byte[] value, final int valueOffset, + final int valueLength, final long cfHandle) throws RocksDBException; + private native void put(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength) + throws RocksDBException; + private native void put(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength, + final long cfHandle) throws RocksDBException; + private native void delete(final long handle, final byte[] key, + final int keyOffset, final int keyLength) throws RocksDBException; + private native void delete(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final long cfHandle) + throws RocksDBException; + private native void delete(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength) + throws RocksDBException; + private native void delete(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final long cfHandle) throws RocksDBException; + private native void singleDelete( + final long handle, final byte[] key, final int keyLen) + throws RocksDBException; + private native void singleDelete( + final long handle, final byte[] key, final int keyLen, + final long cfHandle) throws RocksDBException; + private native void singleDelete( + final long handle, final long writeOptHandle, final byte[] key, + final int keyLen) throws RocksDBException; + private native void singleDelete( + final long handle, final long writeOptHandle, + final byte[] key, final int keyLen, final long cfHandle) + throws RocksDBException; + private native void deleteRange(final long handle, final byte[] beginKey, + final int beginKeyOffset, final int beginKeyLength, final byte[] endKey, + final int endKeyOffset, final int endKeyLength) throws RocksDBException; + private native void deleteRange(final long handle, final byte[] beginKey, + final int beginKeyOffset, final int beginKeyLength, final byte[] endKey, + final int endKeyOffset, final int endKeyLength, final long cfHandle) + throws RocksDBException; + private native void deleteRange(final long handle, final long writeOptHandle, + final byte[] beginKey, final int beginKeyOffset, final int beginKeyLength, + final byte[] endKey, final int endKeyOffset, final int endKeyLength) + throws RocksDBException; + private native void deleteRange( + final long handle, final long writeOptHandle, final byte[] beginKey, + final int beginKeyOffset, final int beginKeyLength, final byte[] endKey, + final int endKeyOffset, final int endKeyLength, final long cfHandle) + throws RocksDBException; + private native void merge(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final byte[] value, + final int valueOffset, final int valueLength) throws RocksDBException; + private native void merge(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final byte[] value, + final int valueOffset, final int valueLength, final long cfHandle) + throws RocksDBException; + private native void merge(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength) + throws RocksDBException; + private native void merge(final long handle, final long writeOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength, + final long cfHandle) throws RocksDBException; + private native void write0(final long handle, final long writeOptHandle, + final long wbHandle) throws RocksDBException; + private native void write1(final long handle, final long writeOptHandle, + final long wbwiHandle) throws RocksDBException; + private native int get(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final byte[] value, + final int valueOffset, final int valueLength) throws RocksDBException; + private native int get(final long handle, final byte[] key, + final int keyOffset, final int keyLength, byte[] value, + final int valueOffset, final int valueLength, final long cfHandle) + throws RocksDBException; + private native int get(final long handle, final long readOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength) + throws RocksDBException; + private native int get(final long handle, final long readOptHandle, + final byte[] key, final int keyOffset, final int keyLength, + final byte[] value, final int valueOffset, final int valueLength, + final long cfHandle) throws RocksDBException; + private native byte[] get(final long handle, byte[] key, final int keyOffset, + final int keyLength) throws RocksDBException; + private native byte[] get(final long handle, final byte[] key, + final int keyOffset, final int keyLength, final long cfHandle) + throws RocksDBException; + private native byte[] get(final long handle, final long readOptHandle, + final byte[] key, final int keyOffset, final int keyLength) + throws RocksDBException; + private native byte[] get(final long handle, + final long readOptHandle, final byte[] key, final int keyOffset, + final int keyLength, final long cfHandle) throws RocksDBException; + private native byte[][] multiGet(final long dbHandle, final byte[][] keys, + final int[] keyOffsets, final int[] keyLengths); + private native byte[][] multiGet(final long dbHandle, final byte[][] keys, + final int[] keyOffsets, final int[] keyLengths, + final long[] columnFamilyHandles); + private native byte[][] multiGet(final long dbHandle, final long rOptHandle, + final byte[][] keys, final int[] keyOffsets, final int[] keyLengths); + private native byte[][] multiGet(final long dbHandle, final long rOptHandle, + final byte[][] keys, final int[] keyOffsets, final int[] keyLengths, + final long[] columnFamilyHandles); + + private native void multiGet(final long dbHandle, final long rOptHandle, + final long[] columnFamilyHandles, final ByteBuffer[] keysArray, final int[] keyOffsets, + final int[] keyLengths, final ByteBuffer[] valuesArray, final int[] valuesSizeArray, + final Status[] statusArray); + + private native boolean keyMayExist( + final long handle, final long cfHandle, final long readOptHandle, + final byte[] key, final int keyOffset, final int keyLength); + private native byte[][] keyMayExistFoundValue( + final long handle, final long cfHandle, final long readOptHandle, + final byte[] key, final int keyOffset, final int keyLength); + private native void putDirect(long handle, long writeOptHandle, ByteBuffer key, int keyOffset, + int keyLength, ByteBuffer value, int valueOffset, int valueLength, long cfHandle) + throws RocksDBException; + private native long iterator(final long handle); + private native long iterator(final long handle, final long readOptHandle); + private native long iteratorCF(final long handle, final long cfHandle); + private native long iteratorCF(final long handle, final long cfHandle, + final long readOptHandle); + private native long[] iterators(final long handle, + final long[] columnFamilyHandles, final long readOptHandle) + throws RocksDBException; + private native long getSnapshot(final long nativeHandle); + private native void releaseSnapshot( + final long nativeHandle, final long snapshotHandle); + private native String getProperty(final long nativeHandle, + final long cfHandle, final String property, final int propertyLength) + throws RocksDBException; + private native Map getMapProperty(final long nativeHandle, + final long cfHandle, final String property, final int propertyLength) + throws RocksDBException; + private native int getDirect(long handle, long readOptHandle, ByteBuffer key, int keyOffset, + int keyLength, ByteBuffer value, int valueOffset, int valueLength, long cfHandle) + throws RocksDBException; + private native boolean keyMayExistDirect(final long handle, final long cfHhandle, + final long readOptHandle, final ByteBuffer key, final int keyOffset, final int keyLength); + private native int[] keyMayExistDirectFoundValue(final long handle, final long cfHhandle, + final long readOptHandle, final ByteBuffer key, final int keyOffset, final int keyLength, + final ByteBuffer value, final int valueOffset, final int valueLength); + private native void deleteDirect(long handle, long optHandle, ByteBuffer key, int keyOffset, + int keyLength, long cfHandle) throws RocksDBException; + private native long getLongProperty(final long nativeHandle, + final long cfHandle, final String property, final int propertyLength) + throws RocksDBException; + private native void resetStats(final long nativeHandle) + throws RocksDBException; + private native long getAggregatedLongProperty(final long nativeHandle, + final String property, int propertyLength) throws RocksDBException; + private native long[] getApproximateSizes(final long nativeHandle, + final long columnFamilyHandle, final long[] rangeSliceHandles, + final byte includeFlags); + private native long[] getApproximateMemTableStats(final long nativeHandle, + final long columnFamilyHandle, final long rangeStartSliceHandle, + final long rangeLimitSliceHandle); + private native void compactRange(final long handle, + /* @Nullable */ final byte[] begin, final int beginLen, + /* @Nullable */ final byte[] end, final int endLen, + final long compactRangeOptHandle, final long cfHandle) + throws RocksDBException; + private native void setOptions(final long handle, final long cfHandle, + final String[] keys, final String[] values) throws RocksDBException; + private native String getOptions(final long handle, final long cfHandle); + private native void setDBOptions(final long handle, + final String[] keys, final String[] values) throws RocksDBException; + private native String getDBOptions(final long handle); + private native String[] compactFiles(final long handle, + final long compactionOptionsHandle, + final long columnFamilyHandle, + final String[] inputFileNames, + final int outputLevel, + final int outputPathId, + final long compactionJobInfoHandle) throws RocksDBException; + private native void cancelAllBackgroundWork(final long handle, + final boolean wait); + private native void pauseBackgroundWork(final long handle) + throws RocksDBException; + private native void continueBackgroundWork(final long handle) + throws RocksDBException; + private native void enableAutoCompaction(final long handle, + final long[] columnFamilyHandles) throws RocksDBException; + private native int numberLevels(final long handle, + final long columnFamilyHandle); + private native int maxMemCompactionLevel(final long handle, + final long columnFamilyHandle); + private native int level0StopWriteTrigger(final long handle, + final long columnFamilyHandle); + private native String getName(final long handle); + private native long getEnv(final long handle); + private native void flush(final long handle, final long flushOptHandle, + /* @Nullable */ final long[] cfHandles) throws RocksDBException; + private native void flushWal(final long handle, final boolean sync) + throws RocksDBException; + private native void syncWal(final long handle) throws RocksDBException; + private native long getLatestSequenceNumber(final long handle); + private native void disableFileDeletions(long handle) throws RocksDBException; + private native void enableFileDeletions(long handle, boolean force) + throws RocksDBException; + private native String[] getLiveFiles(final long handle, + final boolean flushMemtable) throws RocksDBException; + private native LogFile[] getSortedWalFiles(final long handle) + throws RocksDBException; + private native long getUpdatesSince(final long handle, + final long sequenceNumber) throws RocksDBException; + private native void deleteFile(final long handle, final String name) + throws RocksDBException; + private native LiveFileMetaData[] getLiveFilesMetaData(final long handle); + private native ColumnFamilyMetaData getColumnFamilyMetaData( + final long handle, final long columnFamilyHandle); + private native void ingestExternalFile(final long handle, + final long columnFamilyHandle, final String[] filePathList, + final int filePathListLen, final long ingestExternalFileOptionsHandle) + throws RocksDBException; + private native void verifyChecksum(final long handle) throws RocksDBException; + private native long getDefaultColumnFamily(final long handle); + private native Map getPropertiesOfAllTables( + final long handle, final long columnFamilyHandle) throws RocksDBException; + private native Map getPropertiesOfTablesInRange( + final long handle, final long columnFamilyHandle, + final long[] rangeSliceHandles); + private native long[] suggestCompactRange(final long handle, + final long columnFamilyHandle) throws RocksDBException; + private native void promoteL0(final long handle, + final long columnFamilyHandle, final int tragetLevel) + throws RocksDBException; + private native void startTrace(final long handle, final long maxTraceFileSize, + final long traceWriterHandle) throws RocksDBException; + private native void endTrace(final long handle) throws RocksDBException; + private native void tryCatchUpWithPrimary(final long handle) throws RocksDBException; + private native void deleteFilesInRanges(long handle, long cfHandle, final byte[][] ranges, + boolean include_end) throws RocksDBException; + + private native static void destroyDB(final String path, + final long optionsHandle) throws RocksDBException; + + private native static int version(); + + protected DBOptionsInterface options_; + private static Version version; + + public static class Version { + private final byte major; + private final byte minor; + private final byte patch; + + public Version(final byte major, final byte minor, final byte patch) { + this.major = major; + this.minor = minor; + this.patch = patch; + } + + public int getMajor() { + return major; + } + + public int getMinor() { + return minor; + } + + public int getPatch() { + return patch; + } + + @Override + public String toString() { + return getMajor() + "." + getMinor() + "." + getPatch(); + } + + private static Version fromEncodedVersion(int encodedVersion) { + final byte patch = (byte) (encodedVersion & 0xff); + encodedVersion >>= 8; + final byte minor = (byte) (encodedVersion & 0xff); + encodedVersion >>= 8; + final byte major = (byte) (encodedVersion & 0xff); + + return new Version(major, minor, patch); + } + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java new file mode 100644 index 000000000..8b035f458 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java @@ -0,0 +1,44 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * A RocksDBException encapsulates the error of an operation. This exception + * type is used to describe an internal error from the c++ rocksdb library. + */ +public class RocksDBException extends Exception { + + /* @Nullable */ private final Status status; + + /** + * The private construct used by a set of public static factory method. + * + * @param msg the specified error message. + */ + public RocksDBException(final String msg) { + this(msg, null); + } + + public RocksDBException(final String msg, final Status status) { + super(msg); + this.status = status; + } + + public RocksDBException(final Status status) { + super(status.getState() != null ? status.getState() + : status.getCodeString()); + this.status = status; + } + + /** + * Get the status returned from RocksDB + * + * @return The status reported by RocksDB, or null if no status is available + */ + public Status getStatus() { + return status; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java new file mode 100644 index 000000000..b3681d77d --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + *

A RocksEnv is an interface used by the rocksdb implementation to access + * operating system functionality like the filesystem etc.

+ * + *

All Env implementations are safe for concurrent access from + * multiple threads without any external synchronization.

+ */ +public class RocksEnv extends Env { + + /** + *

Package-private constructor that uses the specified native handle + * to construct a RocksEnv.

+ * + *

Note that the ownership of the input handle + * belongs to the caller, and the newly created RocksEnv will not take + * the ownership of the input handle. As a result, calling + * {@code dispose()} of the created RocksEnv will be no-op.

+ */ + RocksEnv(final long handle) { + super(handle); + } + + @Override + protected native final void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java new file mode 100644 index 000000000..20e56d2eb --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java @@ -0,0 +1,140 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + *

An iterator that yields a sequence of key/value pairs from a source. + * Multiple implementations are provided by this library. + * In particular, iterators are provided + * to access the contents of a Table or a DB.

+ * + *

Multiple threads can invoke const methods on an RocksIterator without + * external synchronization, but if any of the threads may call a + * non-const method, all threads accessing the same RocksIterator must use + * external synchronization.

+ * + * @see org.rocksdb.RocksObject + */ +public class RocksIterator extends AbstractRocksIterator { + protected RocksIterator(final RocksDB rocksDB, final long nativeHandle) { + super(rocksDB, nativeHandle); + } + + /** + *

Return the key for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator.

+ * + *

REQUIRES: {@link #isValid()}

+ * + * @return key for the current entry. + */ + public byte[] key() { + assert(isOwningHandle()); + return key0(nativeHandle_); + } + + /** + *

Return the key for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator.

+ * + *

REQUIRES: {@link #isValid()}

+ * + * @param key the out-value to receive the retrieved key. + * It is using position and limit. Limit is set according to key size. + * Supports direct buffer only. + * @return The size of the actual key. If the return key is greater than the + * length of {@code key}, then it indicates that the size of the + * input buffer {@code key} is insufficient and partial result will + * be returned. + */ + public int key(final ByteBuffer key) { + assert isOwningHandle(); + final int result; + if (key.isDirect()) { + result = keyDirect0(nativeHandle_, key, key.position(), key.remaining()); + } else { + assert key.hasArray(); + result = keyByteArray0( + nativeHandle_, key.array(), key.arrayOffset() + key.position(), key.remaining()); + } + key.limit(Math.min(key.position() + result, key.limit())); + return result; + } + + /** + *

Return the value for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator.

+ * + *

REQUIRES: !AtEnd() && !AtStart()

+ * @return value for the current entry. + */ + public byte[] value() { + assert(isOwningHandle()); + return value0(nativeHandle_); + } + + /** + *

Return the value for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator.

+ * + *

REQUIRES: {@link #isValid()}

+ * + * @param value the out-value to receive the retrieved value. + * It is using position and limit. Limit is set according to value size. + * Supports direct buffer only. + * @return The size of the actual value. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. + */ + public int value(final ByteBuffer value) { + assert isOwningHandle(); + final int result; + if (value.isDirect()) { + result = valueDirect0(nativeHandle_, value, value.position(), value.remaining()); + } else { + assert value.hasArray(); + result = valueByteArray0( + nativeHandle_, value.array(), value.arrayOffset() + value.position(), value.remaining()); + } + value.limit(Math.min(value.position() + result, value.limit())); + return result; + } + + @Override protected final native void disposeInternal(final long handle); + @Override final native boolean isValid0(long handle); + @Override final native void seekToFirst0(long handle); + @Override final native void seekToLast0(long handle); + @Override final native void next0(long handle); + @Override final native void prev0(long handle); + @Override final native void refresh0(long handle); + @Override final native void seek0(long handle, byte[] target, int targetLen); + @Override final native void seekForPrev0(long handle, byte[] target, int targetLen); + @Override + final native void seekDirect0(long handle, ByteBuffer target, int targetOffset, int targetLen); + @Override + final native void seekByteArray0(long handle, byte[] target, int targetOffset, int targetLen); + @Override + final native void seekForPrevDirect0( + long handle, ByteBuffer target, int targetOffset, int targetLen); + @Override + final native void seekForPrevByteArray0( + long handle, byte[] target, int targetOffset, int targetLen); + @Override final native void status0(long handle) throws RocksDBException; + + private native byte[] key0(long handle); + private native byte[] value0(long handle); + private native int keyDirect0(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen); + private native int keyByteArray0(long handle, byte[] array, int arrayOffset, int arrayLen); + private native int valueDirect0(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen); + private native int valueByteArray0(long handle, byte[] array, int arrayOffset, int arrayLen); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java new file mode 100644 index 000000000..819c21c2c --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java @@ -0,0 +1,127 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + *

Defines the interface for an Iterator which provides + * access to data one entry at a time. Multiple implementations + * are provided by this library. In particular, iterators are provided + * to access the contents of a DB and Write Batch.

+ * + *

Multiple threads can invoke const methods on an RocksIterator without + * external synchronization, but if any of the threads may call a + * non-const method, all threads accessing the same RocksIterator must use + * external synchronization.

+ * + * @see org.rocksdb.RocksObject + */ +public interface RocksIteratorInterface { + + /** + *

An iterator is either positioned at an entry, or + * not valid. This method returns true if the iterator is valid.

+ * + * @return true if iterator is valid. + */ + boolean isValid(); + + /** + *

Position at the first entry in the source. The iterator is Valid() + * after this call if the source is not empty.

+ */ + void seekToFirst(); + + /** + *

Position at the last entry in the source. The iterator is + * valid after this call if the source is not empty.

+ */ + void seekToLast(); + + /** + *

Position at the first entry in the source whose key is at or + * past target.

+ * + *

The iterator is valid after this call if the source contains + * a key that comes at or past target.

+ * + * @param target byte array describing a key or a + * key prefix to seek for. + */ + void seek(byte[] target); + + /** + *

Position at the first entry in the source whose key is that or + * before target.

+ * + *

The iterator is valid after this call if the source contains + * a key that comes at or before target.

+ * + * @param target byte array describing a key or a + * key prefix to seek for. + */ + void seekForPrev(byte[] target); + + /** + *

Position at the first entry in the source whose key is that or + * past target.

+ * + *

The iterator is valid after this call if the source contains + * a key that comes at or past target.

+ * + * @param target byte array describing a key or a + * key prefix to seek for. Supports direct buffer only. + */ + void seek(ByteBuffer target); + + /** + *

Position at the last key that is less than or equal to the target key.

+ * + *

The iterator is valid after this call if the source contains + * a key that comes at or past target.

+ * + * @param target byte array describing a key or a + * key prefix to seek for. Supports direct buffer only. + */ + void seekForPrev(ByteBuffer target); + + /** + *

Moves to the next entry in the source. After this call, Valid() is + * true if the iterator was not positioned at the last entry in the source.

+ * + *

REQUIRES: {@link #isValid()}

+ */ + void next(); + + /** + *

Moves to the previous entry in the source. After this call, Valid() is + * true if the iterator was not positioned at the first entry in source.

+ * + *

REQUIRES: {@link #isValid()}

+ */ + void prev(); + + /** + *

If an error has occurred, return it. Else return an ok status. + * If non-blocking IO is requested and this operation cannot be + * satisfied without doing some IO, then this returns Status::Incomplete().

+ * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + void status() throws RocksDBException; + + /** + *

If supported, renew the iterator to represent the latest state. The iterator will be + * invalidated after the call. Not supported if {@link ReadOptions#setSnapshot(Snapshot)} was + * specified when creating the iterator.

+ * + * @throws RocksDBException thrown if the operation is not supported or an error happens in the + * underlying native library + */ + void refresh() throws RocksDBException; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java new file mode 100644 index 000000000..39a6f6e1c --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java @@ -0,0 +1,31 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Memory environment. + */ +//TODO(AR) rename to MemEnv +public class RocksMemEnv extends Env { + + /** + *

Creates a new environment that stores its data + * in memory and delegates all non-file-storage tasks to + * {@code baseEnv}.

+ * + *

The caller must delete the result when it is + * no longer needed.

+ * + * @param baseEnv the base environment, + * must remain live while the result is in use. + */ + public RocksMemEnv(final Env baseEnv) { + super(createMemEnv(baseEnv.nativeHandle_)); + } + + private static native long createMemEnv(final long baseEnvHandle); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java new file mode 100644 index 000000000..e92289dc0 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java @@ -0,0 +1,87 @@ +// Copyright (c) 2016, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * RocksMutableObject is an implementation of {@link AbstractNativeReference} + * whose reference to the underlying native C++ object can change. + * + *

The use of {@code RocksMutableObject} should be kept to a minimum, as it + * has synchronization overheads and introduces complexity. Instead it is + * recommended to use {@link RocksObject} where possible.

+ */ +public abstract class RocksMutableObject extends AbstractNativeReference { + + /** + * An mutable reference to the value of the C++ pointer pointing to some + * underlying native RocksDB C++ object. + */ + private long nativeHandle_; + private boolean owningHandle_; + + protected RocksMutableObject() { + } + + protected RocksMutableObject(final long nativeHandle) { + this.nativeHandle_ = nativeHandle; + this.owningHandle_ = true; + } + + /** + * Closes the existing handle, and changes the handle to the new handle + * + * @param newNativeHandle The C++ pointer to the new native object + * @param owningNativeHandle true if we own the new native object + */ + public synchronized void resetNativeHandle(final long newNativeHandle, + final boolean owningNativeHandle) { + close(); + setNativeHandle(newNativeHandle, owningNativeHandle); + } + + /** + * Sets the handle (C++ pointer) of the underlying C++ native object + * + * @param nativeHandle The C++ pointer to the native object + * @param owningNativeHandle true if we own the native object + */ + public synchronized void setNativeHandle(final long nativeHandle, + final boolean owningNativeHandle) { + this.nativeHandle_ = nativeHandle; + this.owningHandle_ = owningNativeHandle; + } + + @Override + protected synchronized boolean isOwningHandle() { + return this.owningHandle_; + } + + /** + * Gets the value of the C++ pointer pointing to the underlying + * native C++ object + * + * @return the pointer value for the native object + */ + protected synchronized long getNativeHandle() { + assert (this.nativeHandle_ != 0); + return this.nativeHandle_; + } + + @Override + public synchronized final void close() { + if (isOwningHandle()) { + disposeInternal(); + this.owningHandle_ = false; + this.nativeHandle_ = 0; + } + } + + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + protected abstract void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java b/src/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java new file mode 100644 index 000000000..f07e1018a --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java @@ -0,0 +1,45 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * RocksObject is an implementation of {@link AbstractNativeReference} which + * has an immutable and therefore thread-safe reference to the underlying + * native C++ RocksDB object. + *

+ * RocksObject is the base-class of almost all RocksDB classes that have a + * pointer to some underlying native C++ {@code rocksdb} object.

+ *

+ * The use of {@code RocksObject} should always be preferred over + * {@link RocksMutableObject}.

+ */ +public abstract class RocksObject extends AbstractImmutableNativeReference { + + /** + * An immutable reference to the value of the C++ pointer pointing to some + * underlying native RocksDB C++ object. + */ + protected final long nativeHandle_; + + protected RocksObject(final long nativeHandle) { + super(true); + this.nativeHandle_ = nativeHandle; + } + + /** + * Deletes underlying C++ object pointer. + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + protected abstract void disposeInternal(final long handle); + + public long getNativeHandle() { + return nativeHandle_; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SanityLevel.java b/src/rocksdb/java/src/main/java/org/rocksdb/SanityLevel.java new file mode 100644 index 000000000..30568c363 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/SanityLevel.java @@ -0,0 +1,47 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum SanityLevel { + NONE((byte) 0x0), + LOOSELY_COMPATIBLE((byte) 0x1), + EXACT_MATCH((byte) 0xFF); + + private final byte value; + + SanityLevel(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ + byte getValue() { + return value; + } + + /** + * Get the SanityLevel from the internal representation value. + * + * @param value the internal representation value. + * + * @return the SanityLevel + * + * @throws IllegalArgumentException if the value does not match a + * SanityLevel + */ + static SanityLevel fromValue(final byte value) throws IllegalArgumentException { + for (final SanityLevel level : SanityLevel.values()) { + if (level.value == value) { + return level; + } + } + throw new IllegalArgumentException("Unknown value for SanityLevel: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SizeApproximationFlag.java b/src/rocksdb/java/src/main/java/org/rocksdb/SizeApproximationFlag.java new file mode 100644 index 000000000..fe3c2dd05 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/SizeApproximationFlag.java @@ -0,0 +1,31 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +import java.util.List; + +/** + * Flags for + * {@link RocksDB#getApproximateSizes(ColumnFamilyHandle, List, SizeApproximationFlag...)} + * that specify whether memtable stats should be included, + * or file stats approximation or both. + */ +public enum SizeApproximationFlag { + NONE((byte)0x0), + INCLUDE_MEMTABLES((byte)0x1), + INCLUDE_FILES((byte)0x2); + + private final byte value; + + SizeApproximationFlag(final byte value) { + this.value = value; + } + + /** + * Get the internal byte representation. + * + * @return the internal representation. + */ + byte getValue() { + return value; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java new file mode 100644 index 000000000..e2c1b97d8 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java @@ -0,0 +1,51 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +/** + * The config for skip-list memtable representation. + */ +public class SkipListMemTableConfig extends MemTableConfig { + + public static final long DEFAULT_LOOKAHEAD = 0; + + /** + * SkipListMemTableConfig constructor + */ + public SkipListMemTableConfig() { + lookahead_ = DEFAULT_LOOKAHEAD; + } + + /** + * Sets lookahead for SkipList + * + * @param lookahead If non-zero, each iterator's seek operation + * will start the search from the previously visited record + * (doing at most 'lookahead' steps). This is an + * optimization for the access pattern including many + * seeks with consecutive keys. + * @return the current instance of SkipListMemTableConfig + */ + public SkipListMemTableConfig setLookahead(final long lookahead) { + lookahead_ = lookahead; + return this; + } + + /** + * Returns the currently set lookahead value. + * + * @return lookahead value + */ + public long lookahead() { + return lookahead_; + } + + + @Override protected long newMemTableFactoryHandle() { + return newMemTableFactoryHandle0(lookahead_); + } + + private native long newMemTableFactoryHandle0(long lookahead) + throws IllegalArgumentException; + + private long lookahead_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Slice.java b/src/rocksdb/java/src/main/java/org/rocksdb/Slice.java new file mode 100644 index 000000000..50d9f7652 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Slice.java @@ -0,0 +1,136 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + *

Base class for slices which will receive + * byte[] based access to the underlying data.

+ * + *

byte[] backed slices typically perform better with + * small keys and values. When using larger keys and + * values consider using {@link org.rocksdb.DirectSlice}

+ */ +public class Slice extends AbstractSlice { + + /** + * Indicates whether we have to free the memory pointed to by the Slice + */ + private volatile boolean cleared; + private volatile long internalBufferOffset = 0; + + /** + *

Called from JNI to construct a new Java Slice + * without an underlying C++ object set + * at creation time.

+ * + *

Note: You should be aware that + * {@see org.rocksdb.RocksObject#disOwnNativeHandle()} is intentionally + * called from the default Slice constructor, and that it is marked as + * private. This is so that developers cannot construct their own default + * Slice objects (at present). As developers cannot construct their own + * Slice objects through this, they are not creating underlying C++ Slice + * objects, and so there is nothing to free (dispose) from Java.

+ */ + @SuppressWarnings("unused") + private Slice() { + super(); + } + + /** + *

Package-private Slice constructor which is used to construct + * Slice instances from C++ side. As the reference to this + * object is also managed from C++ side the handle will be disowned.

+ * + * @param nativeHandle address of native instance. + */ + Slice(final long nativeHandle) { + this(nativeHandle, false); + } + + /** + *

Package-private Slice constructor which is used to construct + * Slice instances using a handle.

+ * + * @param nativeHandle address of native instance. + * @param owningNativeHandle true if the Java side owns the memory pointed to + * by this reference, false if ownership belongs to the C++ side + */ + Slice(final long nativeHandle, final boolean owningNativeHandle) { + super(); + setNativeHandle(nativeHandle, owningNativeHandle); + } + + /** + *

Constructs a slice where the data is taken from + * a String.

+ * + * @param str String value. + */ + public Slice(final String str) { + super(createNewSliceFromString(str)); + } + + /** + *

Constructs a slice where the data is a copy of + * the byte array from a specific offset.

+ * + * @param data byte array. + * @param offset offset within the byte array. + */ + public Slice(final byte[] data, final int offset) { + super(createNewSlice0(data, offset)); + } + + /** + *

Constructs a slice where the data is a copy of + * the byte array.

+ * + * @param data byte array. + */ + public Slice(final byte[] data) { + super(createNewSlice1(data)); + } + + @Override + public void clear() { + clear0(getNativeHandle(), !cleared, internalBufferOffset); + cleared = true; + } + + @Override + public void removePrefix(final int n) { + removePrefix0(getNativeHandle(), n); + this.internalBufferOffset += n; + } + + /** + *

Deletes underlying C++ slice pointer + * and any buffered data.

+ * + *

+ * Note that this function should be called only after all + * RocksDB instances referencing the slice are closed. + * Otherwise an undefined behavior will occur.

+ */ + @Override + protected void disposeInternal() { + final long nativeHandle = getNativeHandle(); + if(!cleared) { + disposeInternalBuf(nativeHandle, internalBufferOffset); + } + super.disposeInternal(nativeHandle); + } + + @Override protected final native byte[] data0(long handle); + private native static long createNewSlice0(final byte[] data, + final int length); + private native static long createNewSlice1(final byte[] data); + private native void clear0(long handle, boolean internalBuffer, + long internalBufferOffset); + private native void removePrefix0(long handle, int length); + private native void disposeInternalBuf(final long handle, + long internalBufferOffset); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java b/src/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java new file mode 100644 index 000000000..39cdf0c2d --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java @@ -0,0 +1,41 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Snapshot of database + */ +public class Snapshot extends RocksObject { + Snapshot(final long nativeHandle) { + super(nativeHandle); + + // The pointer to the snapshot is always released + // by the database instance. + disOwnNativeHandle(); + } + + /** + * Return the associated sequence number; + * + * @return the associated sequence number of + * this snapshot. + */ + public long getSequenceNumber() { + return getSequenceNumber(nativeHandle_); + } + + @Override + protected final void disposeInternal(final long handle) { + /** + * Nothing to release, we never own the pointer for a + * Snapshot. The pointer + * to the snapshot is released by the database + * instance. + */ + } + + private native long getSequenceNumber(long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstFileManager.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileManager.java new file mode 100644 index 000000000..8805410aa --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileManager.java @@ -0,0 +1,251 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Map; + +/** + * SstFileManager is used to track SST files in the DB and control their + * deletion rate. + * + * All SstFileManager public functions are thread-safe. + * + * SstFileManager is not extensible. + */ +//@ThreadSafe +public final class SstFileManager extends RocksObject { + + public static final long RATE_BYTES_PER_SEC_DEFAULT = 0; + public static final boolean DELETE_EXISTING_TRASH_DEFAULT = true; + public static final double MAX_TRASH_DB_RATION_DEFAULT = 0.25; + public static final long BYTES_MAX_DELETE_CHUNK_DEFAULT = 64 * 1024 * 1024; + + /** + * Create a new SstFileManager that can be shared among multiple RocksDB + * instances to track SST file and control there deletion rate. + * + * @param env the environment. + * + * @throws RocksDBException thrown if error happens in underlying native library. + */ + public SstFileManager(final Env env) throws RocksDBException { + this(env, null); + } + + /** + * Create a new SstFileManager that can be shared among multiple RocksDB + * instances to track SST file and control there deletion rate. + * + * @param env the environment. + * @param logger if not null, the logger will be used to log errors. + * + * @throws RocksDBException thrown if error happens in underlying native library. + */ + public SstFileManager(final Env env, /*@Nullable*/ final Logger logger) + throws RocksDBException { + this(env, logger, RATE_BYTES_PER_SEC_DEFAULT); + } + + /** + * Create a new SstFileManager that can be shared among multiple RocksDB + * instances to track SST file and control there deletion rate. + * + * @param env the environment. + * @param logger if not null, the logger will be used to log errors. + * + * == Deletion rate limiting specific arguments == + * @param rateBytesPerSec how many bytes should be deleted per second, If + * this value is set to 1024 (1 Kb / sec) and we deleted a file of size + * 4 Kb in 1 second, we will wait for another 3 seconds before we delete + * other files, Set to 0 to disable deletion rate limiting. + * + * @throws RocksDBException thrown if error happens in underlying native library. + */ + public SstFileManager(final Env env, /*@Nullable*/ final Logger logger, + final long rateBytesPerSec) throws RocksDBException { + this(env, logger, rateBytesPerSec, MAX_TRASH_DB_RATION_DEFAULT); + } + + /** + * Create a new SstFileManager that can be shared among multiple RocksDB + * instances to track SST file and control there deletion rate. + * + * @param env the environment. + * @param logger if not null, the logger will be used to log errors. + * + * == Deletion rate limiting specific arguments == + * @param rateBytesPerSec how many bytes should be deleted per second, If + * this value is set to 1024 (1 Kb / sec) and we deleted a file of size + * 4 Kb in 1 second, we will wait for another 3 seconds before we delete + * other files, Set to 0 to disable deletion rate limiting. + * @param maxTrashDbRatio if the trash size constitutes for more than this + * fraction of the total DB size we will start deleting new files passed + * to DeleteScheduler immediately. + * + * @throws RocksDBException thrown if error happens in underlying native library. + */ + public SstFileManager(final Env env, /*@Nullable*/ final Logger logger, + final long rateBytesPerSec, final double maxTrashDbRatio) + throws RocksDBException { + this(env, logger, rateBytesPerSec, maxTrashDbRatio, + BYTES_MAX_DELETE_CHUNK_DEFAULT); + } + + /** + * Create a new SstFileManager that can be shared among multiple RocksDB + * instances to track SST file and control there deletion rate. + * + * @param env the environment. + * @param logger if not null, the logger will be used to log errors. + * + * == Deletion rate limiting specific arguments == + * @param rateBytesPerSec how many bytes should be deleted per second, If + * this value is set to 1024 (1 Kb / sec) and we deleted a file of size + * 4 Kb in 1 second, we will wait for another 3 seconds before we delete + * other files, Set to 0 to disable deletion rate limiting. + * @param maxTrashDbRatio if the trash size constitutes for more than this + * fraction of the total DB size we will start deleting new files passed + * to DeleteScheduler immediately. + * @param bytesMaxDeleteChunk if a single file is larger than delete chunk, + * ftruncate the file by this size each time, rather than dropping the whole + * file. 0 means to always delete the whole file. + * + * @throws RocksDBException thrown if error happens in underlying native library. + */ + public SstFileManager(final Env env, /*@Nullable*/final Logger logger, + final long rateBytesPerSec, final double maxTrashDbRatio, + final long bytesMaxDeleteChunk) throws RocksDBException { + super(newSstFileManager(env.nativeHandle_, + logger != null ? logger.nativeHandle_ : 0, + rateBytesPerSec, maxTrashDbRatio, bytesMaxDeleteChunk)); + } + + + /** + * Update the maximum allowed space that should be used by RocksDB, if + * the total size of the SST files exceeds {@code maxAllowedSpace}, writes to + * RocksDB will fail. + * + * Setting {@code maxAllowedSpace} to 0 will disable this feature; + * maximum allowed space will be infinite (Default value). + * + * @param maxAllowedSpace the maximum allowed space that should be used by + * RocksDB. + */ + public void setMaxAllowedSpaceUsage(final long maxAllowedSpace) { + setMaxAllowedSpaceUsage(nativeHandle_, maxAllowedSpace); + } + + /** + * Set the amount of buffer room each compaction should be able to leave. + * In other words, at its maximum disk space consumption, the compaction + * should still leave {@code compactionBufferSize} available on the disk so + * that other background functions may continue, such as logging and flushing. + * + * @param compactionBufferSize the amount of buffer room each compaction + * should be able to leave. + */ + public void setCompactionBufferSize(final long compactionBufferSize) { + setCompactionBufferSize(nativeHandle_, compactionBufferSize); + } + + /** + * Determines if the total size of SST files exceeded the maximum allowed + * space usage. + * + * @return true when the maximum allows space usage has been exceeded. + */ + public boolean isMaxAllowedSpaceReached() { + return isMaxAllowedSpaceReached(nativeHandle_); + } + + /** + * Determines if the total size of SST files as well as estimated size + * of ongoing compactions exceeds the maximums allowed space usage. + * + * @return true when the total size of SST files as well as estimated size + * of ongoing compactions exceeds the maximums allowed space usage. + */ + public boolean isMaxAllowedSpaceReachedIncludingCompactions() { + return isMaxAllowedSpaceReachedIncludingCompactions(nativeHandle_); + } + + /** + * Get the total size of all tracked files. + * + * @return the total size of all tracked files. + */ + public long getTotalSize() { + return getTotalSize(nativeHandle_); + } + + /** + * Gets all tracked files and their corresponding sizes. + * + * @return a map containing all tracked files and there corresponding sizes. + */ + public Map getTrackedFiles() { + return getTrackedFiles(nativeHandle_); + } + + /** + * Gets the delete rate limit. + * + * @return the delete rate limit (in bytes per second). + */ + public long getDeleteRateBytesPerSecond() { + return getDeleteRateBytesPerSecond(nativeHandle_); + } + + /** + * Set the delete rate limit. + * + * Zero means disable delete rate limiting and delete files immediately. + * + * @param deleteRate the delete rate limit (in bytes per second). + */ + public void setDeleteRateBytesPerSecond(final long deleteRate) { + setDeleteRateBytesPerSecond(nativeHandle_, deleteRate); + } + + /** + * Get the trash/DB size ratio where new files will be deleted immediately. + * + * @return the trash/DB size ratio. + */ + public double getMaxTrashDBRatio() { + return getMaxTrashDBRatio(nativeHandle_); + } + + /** + * Set the trash/DB size ratio where new files will be deleted immediately. + * + * @param ratio the trash/DB size ratio. + */ + public void setMaxTrashDBRatio(final double ratio) { + setMaxTrashDBRatio(nativeHandle_, ratio); + } + + private native static long newSstFileManager(final long handle, + final long logger_handle, final long rateBytesPerSec, + final double maxTrashDbRatio, final long bytesMaxDeleteChunk) + throws RocksDBException; + private native void setMaxAllowedSpaceUsage(final long handle, + final long maxAllowedSpace); + private native void setCompactionBufferSize(final long handle, + final long compactionBufferSize); + private native boolean isMaxAllowedSpaceReached(final long handle); + private native boolean isMaxAllowedSpaceReachedIncludingCompactions( + final long handle); + private native long getTotalSize(final long handle); + private native Map getTrackedFiles(final long handle); + private native long getDeleteRateBytesPerSecond(final long handle); + private native void setDeleteRateBytesPerSecond(final long handle, + final long deleteRate); + private native double getMaxTrashDBRatio(final long handle); + private native void setMaxTrashDBRatio(final long handle, final double ratio); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstFileMetaData.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileMetaData.java new file mode 100644 index 000000000..a04d05cb5 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileMetaData.java @@ -0,0 +1,162 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The metadata that describes a SST file. + */ +public class SstFileMetaData { + private final String fileName; + private final String path; + private final long size; + private final long smallestSeqno; + private final long largestSeqno; + private final byte[] smallestKey; + private final byte[] largestKey; + private final long numReadsSampled; + private final boolean beingCompacted; + private final long numEntries; + private final long numDeletions; + + /** + * Called from JNI C++ + * + * @param fileName the file name + * @param path the file path + * @param size the size of the file + * @param smallestSeqno the smallest sequence number + * @param largestSeqno the largest sequence number + * @param smallestKey the smallest key + * @param largestKey the largest key + * @param numReadsSampled the number of reads sampled + * @param beingCompacted true if the file is being compacted, false otherwise + * @param numEntries the number of entries + * @param numDeletions the number of deletions + */ + protected SstFileMetaData( + final String fileName, + final String path, + final long size, + final long smallestSeqno, + final long largestSeqno, + final byte[] smallestKey, + final byte[] largestKey, + final long numReadsSampled, + final boolean beingCompacted, + final long numEntries, + final long numDeletions) { + this.fileName = fileName; + this.path = path; + this.size = size; + this.smallestSeqno = smallestSeqno; + this.largestSeqno = largestSeqno; + this.smallestKey = smallestKey; + this.largestKey = largestKey; + this.numReadsSampled = numReadsSampled; + this.beingCompacted = beingCompacted; + this.numEntries = numEntries; + this.numDeletions = numDeletions; + } + + /** + * Get the name of the file. + * + * @return the name of the file. + */ + public String fileName() { + return fileName; + } + + /** + * Get the full path where the file locates. + * + * @return the full path + */ + public String path() { + return path; + } + + /** + * Get the file size in bytes. + * + * @return file size + */ + public long size() { + return size; + } + + /** + * Get the smallest sequence number in file. + * + * @return the smallest sequence number + */ + public long smallestSeqno() { + return smallestSeqno; + } + + /** + * Get the largest sequence number in file. + * + * @return the largest sequence number + */ + public long largestSeqno() { + return largestSeqno; + } + + /** + * Get the smallest user defined key in the file. + * + * @return the smallest user defined key + */ + public byte[] smallestKey() { + return smallestKey; + } + + /** + * Get the largest user defined key in the file. + * + * @return the largest user defined key + */ + public byte[] largestKey() { + return largestKey; + } + + /** + * Get the number of times the file has been read. + * + * @return the number of times the file has been read + */ + public long numReadsSampled() { + return numReadsSampled; + } + + /** + * Returns true if the file is currently being compacted. + * + * @return true if the file is currently being compacted, false otherwise. + */ + public boolean beingCompacted() { + return beingCompacted; + } + + /** + * Get the number of entries. + * + * @return the number of entries. + */ + public long numEntries() { + return numEntries; + } + + /** + * Get the number of deletions. + * + * @return the number of deletions. + */ + public long numDeletions() { + return numDeletions; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReader.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReader.java new file mode 100644 index 000000000..bb1e94ee0 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReader.java @@ -0,0 +1,82 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class SstFileReader extends RocksObject { + static { + RocksDB.loadLibrary(); + } + + public SstFileReader(final Options options) { + super(newSstFileReader(options.nativeHandle_)); + } + + /** + * Returns an iterator that will iterate on all keys in the default + * column family including both keys in the DB and uncommitted keys in this + * transaction. + * + * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read + * from the DB but will NOT change which keys are read from this transaction + * (the keys in this transaction do not yet belong to any snapshot and will be + * fetched regardless). + * + * Caller is responsible for deleting the returned Iterator. + * + * @param readOptions Read options. + * + * @return instance of iterator object. + */ + public SstFileReaderIterator newIterator(final ReadOptions readOptions) { + assert (isOwningHandle()); + long iter = newIterator(nativeHandle_, readOptions.nativeHandle_); + return new SstFileReaderIterator(this, iter); + } + + /** + * Prepare SstFileReader to read a file. + * + * @param filePath the location of file + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void open(final String filePath) throws RocksDBException { + open(nativeHandle_, filePath); + } + + /** + * Verify checksum + * + * @throws RocksDBException if the checksum is not valid + */ + public void verifyChecksum() throws RocksDBException { + verifyChecksum(nativeHandle_); + } + + /** + * Get the properties of the table. + * + * @return the properties + * + * @throws RocksDBException if an error occurs whilst getting the table + * properties + */ + public TableProperties getTableProperties() throws RocksDBException { + return getTableProperties(nativeHandle_); + } + + @Override protected final native void disposeInternal(final long handle); + private native long newIterator(final long handle, final long readOptionsHandle); + + private native void open(final long handle, final String filePath) + throws RocksDBException; + + private native static long newSstFileReader(final long optionsHandle); + private native void verifyChecksum(final long handle) throws RocksDBException; + private native TableProperties getTableProperties(final long handle) + throws RocksDBException; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReaderIterator.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReaderIterator.java new file mode 100644 index 000000000..a4a08167b --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileReaderIterator.java @@ -0,0 +1,140 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + *

An iterator that yields a sequence of key/value pairs from a source. + * Multiple implementations are provided by this library. + * In particular, iterators are provided + * to access the contents of a Table or a DB.

+ * + *

Multiple threads can invoke const methods on an RocksIterator without + * external synchronization, but if any of the threads may call a + * non-const method, all threads accessing the same RocksIterator must use + * external synchronization.

+ * + * @see RocksObject + */ +public class SstFileReaderIterator extends AbstractRocksIterator { + protected SstFileReaderIterator(final SstFileReader reader, final long nativeHandle) { + super(reader, nativeHandle); + } + + /** + *

Return the key for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator.

+ * + *

REQUIRES: {@link #isValid()}

+ * + * @return key for the current entry. + */ + public byte[] key() { + assert (isOwningHandle()); + return key0(nativeHandle_); + } + + /** + *

Return the key for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator.

+ * + *

REQUIRES: {@link #isValid()}

+ * + * @param key the out-value to receive the retrieved key. + * It is using position and limit. Limit is set according to key size. + * Supports direct buffer only. + * @return The size of the actual key. If the return key is greater than the + * length of {@code key}, then it indicates that the size of the + * input buffer {@code key} is insufficient and partial result will + * be returned. + */ + public int key(final ByteBuffer key) { + assert (isOwningHandle()); + final int result; + if (key.isDirect()) { + result = keyDirect0(nativeHandle_, key, key.position(), key.remaining()); + } else { + result = keyByteArray0( + nativeHandle_, key.array(), key.arrayOffset() + key.position(), key.remaining()); + } + key.limit(Math.min(key.position() + result, key.limit())); + return result; + } + + /** + *

Return the value for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator.

+ * + *

REQUIRES: !AtEnd() && !AtStart()

+ * @return value for the current entry. + */ + public byte[] value() { + assert (isOwningHandle()); + return value0(nativeHandle_); + } + + /** + *

Return the value for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator.

+ * + *

REQUIRES: {@link #isValid()}

+ * + * @param value the out-value to receive the retrieved value. + * It is using position and limit. Limit is set according to value size. + * Supports direct buffer only. + * @return The size of the actual value. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. + */ + public int value(final ByteBuffer value) { + assert (isOwningHandle()); + final int result; + if (value.isDirect()) { + result = valueDirect0(nativeHandle_, value, value.position(), value.remaining()); + } else { + result = valueByteArray0( + nativeHandle_, value.array(), value.arrayOffset() + value.position(), value.remaining()); + } + value.limit(Math.min(value.position() + result, value.limit())); + return result; + } + + @Override protected final native void disposeInternal(final long handle); + @Override final native boolean isValid0(long handle); + @Override final native void seekToFirst0(long handle); + @Override final native void seekToLast0(long handle); + @Override final native void next0(long handle); + @Override final native void prev0(long handle); + @Override final native void refresh0(long handle) throws RocksDBException; + @Override final native void seek0(long handle, byte[] target, int targetLen); + @Override final native void seekForPrev0(long handle, byte[] target, int targetLen); + @Override final native void status0(long handle) throws RocksDBException; + @Override + final native void seekDirect0(long handle, ByteBuffer target, int targetOffset, int targetLen); + @Override + final native void seekForPrevDirect0( + long handle, ByteBuffer target, int targetOffset, int targetLen); + @Override + final native void seekByteArray0( + final long handle, final byte[] target, final int targetOffset, final int targetLen); + @Override + final native void seekForPrevByteArray0( + final long handle, final byte[] target, final int targetOffset, final int targetLen); + + private native byte[] key0(long handle); + private native byte[] value0(long handle); + + private native int keyDirect0(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen); + private native int keyByteArray0(long handle, byte[] buffer, int bufferOffset, int bufferLen); + private native int valueDirect0(long handle, ByteBuffer buffer, int bufferOffset, int bufferLen); + private native int valueByteArray0(long handle, byte[] buffer, int bufferOffset, int bufferLen); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java new file mode 100644 index 000000000..fe00c1a12 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstFileWriter.java @@ -0,0 +1,238 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + * SstFileWriter is used to create sst files that can be added to the + * database later. All keys in files generated by SstFileWriter will have + * sequence number = 0. + */ +public class SstFileWriter extends RocksObject { + static { + RocksDB.loadLibrary(); + } + + /** + * SstFileWriter Constructor. + * + * @param envOptions {@link org.rocksdb.EnvOptions} instance. + * @param options {@link org.rocksdb.Options} instance. + */ + public SstFileWriter(final EnvOptions envOptions, final Options options) { + super(newSstFileWriter( + envOptions.nativeHandle_, options.nativeHandle_)); + } + + /** + * Prepare SstFileWriter to write to a file. + * + * @param filePath the location of file + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void open(final String filePath) throws RocksDBException { + open(nativeHandle_, filePath); + } + + /** + * Add a Put key with value to currently opened file. + * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void put(final Slice key, final Slice value) throws RocksDBException { + put(nativeHandle_, key.getNativeHandle(), value.getNativeHandle()); + } + + /** + * Add a Put key with value to currently opened file. + * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void put(final DirectSlice key, final DirectSlice value) + throws RocksDBException { + put(nativeHandle_, key.getNativeHandle(), value.getNativeHandle()); + } + + /** + * Add a Put key with value to currently opened file. + * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void put(final ByteBuffer key, final ByteBuffer value) throws RocksDBException { + assert key.isDirect() && value.isDirect(); + putDirect(nativeHandle_, key, key.position(), key.remaining(), value, value.position(), + value.remaining()); + key.position(key.limit()); + value.position(value.limit()); + } + + /** + * Add a Put key with value to currently opened file. + * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void put(final byte[] key, final byte[] value) throws RocksDBException { + put(nativeHandle_, key, value); + } + + /** + * Add a Merge key with value to currently opened file. + * + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final Slice key, final Slice value) + throws RocksDBException { + merge(nativeHandle_, key.getNativeHandle(), value.getNativeHandle()); + } + + /** + * Add a Merge key with value to currently opened file. + * + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final byte[] key, final byte[] value) + throws RocksDBException { + merge(nativeHandle_, key, value); + } + + /** + * Add a Merge key with value to currently opened file. + * + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final DirectSlice key, final DirectSlice value) + throws RocksDBException { + merge(nativeHandle_, key.getNativeHandle(), value.getNativeHandle()); + } + + /** + * Add a deletion key to currently opened file. + * + * @param key the specified key to be deleted. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final Slice key) throws RocksDBException { + delete(nativeHandle_, key.getNativeHandle()); + } + + /** + * Add a deletion key to currently opened file. + * + * @param key the specified key to be deleted. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final DirectSlice key) throws RocksDBException { + delete(nativeHandle_, key.getNativeHandle()); + } + + /** + * Add a deletion key to currently opened file. + * + * @param key the specified key to be deleted. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void delete(final byte[] key) throws RocksDBException { + delete(nativeHandle_, key); + } + + /** + * Finish the process and close the sst file. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void finish() throws RocksDBException { + finish(nativeHandle_); + } + + /** + * Return the current file size. + * + * @return the current file size. + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public long fileSize() throws RocksDBException { + return fileSize(nativeHandle_); + } + + private native static long newSstFileWriter( + final long envOptionsHandle, final long optionsHandle, + final long userComparatorHandle, final byte comparatorType); + + private native static long newSstFileWriter(final long envOptionsHandle, + final long optionsHandle); + + private native void open(final long handle, final String filePath) + throws RocksDBException; + + private native void put(final long handle, final long keyHandle, + final long valueHandle) throws RocksDBException; + + private native void put(final long handle, final byte[] key, + final byte[] value) throws RocksDBException; + + private native void putDirect(long handle, ByteBuffer key, int keyOffset, int keyLength, + ByteBuffer value, int valueOffset, int valueLength) throws RocksDBException; + + private native long fileSize(long handle) throws RocksDBException; + + private native void merge(final long handle, final long keyHandle, + final long valueHandle) throws RocksDBException; + + private native void merge(final long handle, final byte[] key, + final byte[] value) throws RocksDBException; + + private native void delete(final long handle, final long keyHandle) + throws RocksDBException; + + private native void delete(final long handle, final byte[] key) + throws RocksDBException; + + private native void finish(final long handle) throws RocksDBException; + + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFactory.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFactory.java new file mode 100644 index 000000000..ea6f13565 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFactory.java @@ -0,0 +1,15 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Handle to factory for SstPartitioner. It is used in {@link ColumnFamilyOptions} + */ +public abstract class SstPartitionerFactory extends RocksObject { + protected SstPartitionerFactory(final long nativeHandle) { + super(nativeHandle); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java b/src/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java new file mode 100644 index 000000000..d513c5f15 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/SstPartitionerFixedPrefixFactory.java @@ -0,0 +1,19 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Fixed prefix factory. It partitions SST files using fixed prefix of the key. + */ +public class SstPartitionerFixedPrefixFactory extends SstPartitionerFactory { + public SstPartitionerFixedPrefixFactory(long prefixLength) { + super(newSstPartitionerFixedPrefixFactory0(prefixLength)); + } + + private native static long newSstPartitionerFixedPrefixFactory0(long prefixLength); + + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StateType.java b/src/rocksdb/java/src/main/java/org/rocksdb/StateType.java new file mode 100644 index 000000000..803456bb2 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/StateType.java @@ -0,0 +1,53 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The type used to refer to a thread state. + * + * A state describes lower-level action of a thread + * such as reading / writing a file or waiting for a mutex. + */ +public enum StateType { + STATE_UNKNOWN((byte)0x0), + STATE_MUTEX_WAIT((byte)0x1); + + private final byte value; + + StateType(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ + byte getValue() { + return value; + } + + /** + * Get the State type from the internal representation value. + * + * @param value the internal representation value. + * + * @return the state type + * + * @throws IllegalArgumentException if the value does not match + * a StateType + */ + static StateType fromValue(final byte value) + throws IllegalArgumentException { + for (final StateType threadType : StateType.values()) { + if (threadType.value == value) { + return threadType; + } + } + throw new IllegalArgumentException( + "Unknown value for StateType: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Statistics.java b/src/rocksdb/java/src/main/java/org/rocksdb/Statistics.java new file mode 100644 index 000000000..0938a6d58 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Statistics.java @@ -0,0 +1,152 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.EnumSet; + +/** + * Statistics to analyze the performance of a db. Pointer for statistics object + * is managed by Options class. + */ +public class Statistics extends RocksObject { + + public Statistics() { + super(newStatistics()); + } + + public Statistics(final Statistics otherStatistics) { + super(newStatistics(otherStatistics.nativeHandle_)); + } + + public Statistics(final EnumSet ignoreHistograms) { + super(newStatistics(toArrayValues(ignoreHistograms))); + } + + public Statistics(final EnumSet ignoreHistograms, final Statistics otherStatistics) { + super(newStatistics(toArrayValues(ignoreHistograms), otherStatistics.nativeHandle_)); + } + + /** + * Intentionally package-private. + * + * Used from {@link DBOptions#statistics()} + * + * @param existingStatisticsHandle The C++ pointer to an existing statistics object + */ + Statistics(final long existingStatisticsHandle) { + super(existingStatisticsHandle); + } + + private static byte[] toArrayValues(final EnumSet histogramTypes) { + final byte[] values = new byte[histogramTypes.size()]; + int i = 0; + for(final HistogramType histogramType : histogramTypes) { + values[i++] = histogramType.getValue(); + } + return values; + } + + /** + * Gets the current stats level. + * + * @return The stats level. + */ + public StatsLevel statsLevel() { + return StatsLevel.getStatsLevel(statsLevel(nativeHandle_)); + } + + /** + * Sets the stats level. + * + * @param statsLevel The stats level to set. + */ + public void setStatsLevel(final StatsLevel statsLevel) { + setStatsLevel(nativeHandle_, statsLevel.getValue()); + } + + /** + * Get the count for a ticker. + * + * @param tickerType The ticker to get the count for + * + * @return The count for the ticker + */ + public long getTickerCount(final TickerType tickerType) { + assert(isOwningHandle()); + return getTickerCount(nativeHandle_, tickerType.getValue()); + } + + /** + * Get the count for a ticker and reset the tickers count. + * + * @param tickerType The ticker to get the count for + * + * @return The count for the ticker + */ + public long getAndResetTickerCount(final TickerType tickerType) { + assert(isOwningHandle()); + return getAndResetTickerCount(nativeHandle_, tickerType.getValue()); + } + + /** + * Gets the histogram data for a particular histogram. + * + * @param histogramType The histogram to retrieve the data for + * + * @return The histogram data + */ + public HistogramData getHistogramData(final HistogramType histogramType) { + assert(isOwningHandle()); + return getHistogramData(nativeHandle_, histogramType.getValue()); + } + + /** + * Gets a string representation of a particular histogram. + * + * @param histogramType The histogram to retrieve the data for + * + * @return A string representation of the histogram data + */ + public String getHistogramString(final HistogramType histogramType) { + assert(isOwningHandle()); + return getHistogramString(nativeHandle_, histogramType.getValue()); + } + + /** + * Resets all ticker and histogram stats. + * + * @throws RocksDBException if an error occurs when resetting the statistics. + */ + public void reset() throws RocksDBException { + assert(isOwningHandle()); + reset(nativeHandle_); + } + + /** + * String representation of the statistic object. + */ + @Override + public String toString() { + assert(isOwningHandle()); + return toString(nativeHandle_); + } + + private native static long newStatistics(); + private native static long newStatistics(final long otherStatisticsHandle); + private native static long newStatistics(final byte[] ignoreHistograms); + private native static long newStatistics(final byte[] ignoreHistograms, final long otherStatisticsHandle); + + @Override protected final native void disposeInternal(final long handle); + + private native byte statsLevel(final long handle); + private native void setStatsLevel(final long handle, final byte statsLevel); + private native long getTickerCount(final long handle, final byte tickerType); + private native long getAndResetTickerCount(final long handle, final byte tickerType); + private native HistogramData getHistogramData(final long handle, final byte histogramType); + private native String getHistogramString(final long handle, final byte histogramType); + private native void reset(final long nativeHandle) throws RocksDBException; + private native String toString(final long nativeHandle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java b/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java new file mode 100644 index 000000000..fb3f57150 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java @@ -0,0 +1,111 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +/** + *

Helper class to collect DB statistics periodically at a period specified in + * constructor. Callback function (provided in constructor) is called with + * every statistics collection.

+ * + *

Caller should call start() to start statistics collection. Shutdown() should + * be called to stop stats collection and should be called before statistics ( + * provided in constructor) reference has been disposed.

+ */ +public class StatisticsCollector { + private final List _statsCollectorInputList; + private final ExecutorService _executorService; + private final int _statsCollectionInterval; + private volatile boolean _isRunning = true; + + /** + * Constructor for statistics collector. + * + * @param statsCollectorInputList List of statistics collector input. + * @param statsCollectionIntervalInMilliSeconds Statistics collection time + * period (specified in milliseconds). + */ + public StatisticsCollector( + final List statsCollectorInputList, + final int statsCollectionIntervalInMilliSeconds) { + _statsCollectorInputList = statsCollectorInputList; + _statsCollectionInterval = statsCollectionIntervalInMilliSeconds; + + _executorService = Executors.newSingleThreadExecutor(); + } + + public void start() { + _executorService.submit(collectStatistics()); + } + + /** + * Shuts down statistics collector. + * + * @param shutdownTimeout Time in milli-seconds to wait for shutdown before + * killing the collection process. + * @throws java.lang.InterruptedException thrown if Threads are interrupted. + */ + public void shutDown(final int shutdownTimeout) throws InterruptedException { + _isRunning = false; + + _executorService.shutdownNow(); + // Wait for collectStatistics runnable to finish so that disposal of + // statistics does not cause any exceptions to be thrown. + _executorService.awaitTermination(shutdownTimeout, TimeUnit.MILLISECONDS); + } + + private Runnable collectStatistics() { + return new Runnable() { + + @Override + public void run() { + while (_isRunning) { + try { + if(Thread.currentThread().isInterrupted()) { + break; + } + for(final StatsCollectorInput statsCollectorInput : + _statsCollectorInputList) { + Statistics statistics = statsCollectorInput.getStatistics(); + StatisticsCollectorCallback statsCallback = + statsCollectorInput.getCallback(); + + // Collect ticker data + for(final TickerType ticker : TickerType.values()) { + if(ticker != TickerType.TICKER_ENUM_MAX) { + final long tickerValue = statistics.getTickerCount(ticker); + statsCallback.tickerCallback(ticker, tickerValue); + } + } + + // Collect histogram data + for(final HistogramType histogramType : HistogramType.values()) { + if(histogramType != HistogramType.HISTOGRAM_ENUM_MAX) { + final HistogramData histogramData = + statistics.getHistogramData(histogramType); + statsCallback.histogramCallback(histogramType, histogramData); + } + } + } + + Thread.sleep(_statsCollectionInterval); + } + catch (final InterruptedException e) { + Thread.currentThread().interrupt(); + break; + } + catch (final Exception e) { + throw new RuntimeException("Error while calculating statistics", e); + } + } + } + }; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java b/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java new file mode 100644 index 000000000..f3785b15f --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Callback interface provided to StatisticsCollector. + * + * Thread safety: + * StatisticsCollector doesn't make any guarantees about thread safety. + * If the same reference of StatisticsCollectorCallback is passed to multiple + * StatisticsCollector references, then its the responsibility of the + * user to make StatisticsCollectorCallback's implementation thread-safe. + * + */ +public interface StatisticsCollectorCallback { + /** + * Callback function to get ticker values. + * @param tickerType Ticker type. + * @param tickerCount Value of ticker type. + */ + void tickerCallback(TickerType tickerType, long tickerCount); + + /** + * Callback function to get histogram values. + * @param histType Histogram type. + * @param histData Histogram data. + */ + void histogramCallback(HistogramType histType, HistogramData histData); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java b/src/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java new file mode 100644 index 000000000..5bf43ade5 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java @@ -0,0 +1,35 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Contains all information necessary to collect statistics from one instance + * of DB statistics. + */ +public class StatsCollectorInput { + private final Statistics _statistics; + private final StatisticsCollectorCallback _statsCallback; + + /** + * Constructor for StatsCollectorInput. + * + * @param statistics Reference of DB statistics. + * @param statsCallback Reference of statistics callback interface. + */ + public StatsCollectorInput(final Statistics statistics, + final StatisticsCollectorCallback statsCallback) { + _statistics = statistics; + _statsCallback = statsCallback; + } + + public Statistics getStatistics() { + return _statistics; + } + + public StatisticsCollectorCallback getCallback() { + return _statsCallback; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java b/src/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java new file mode 100644 index 000000000..58504b84a --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/StatsLevel.java @@ -0,0 +1,65 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The level of Statistics to report. + */ +public enum StatsLevel { + /** + * Collect all stats except time inside mutex lock AND time spent on + * compression. + */ + EXCEPT_DETAILED_TIMERS((byte) 0x0), + + /** + * Collect all stats except the counters requiring to get time inside the + * mutex lock. + */ + EXCEPT_TIME_FOR_MUTEX((byte) 0x1), + + /** + * Collect all stats, including measuring duration of mutex operations. + * + * If getting time is expensive on the platform to run, it can + * reduce scalability to more threads, especially for writes. + */ + ALL((byte) 0x2); + + private final byte value; + + StatsLevel(final byte value) { + this.value = value; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + * Get StatsLevel by byte value. + * + * @param value byte representation of StatsLevel. + * + * @return {@link org.rocksdb.StatsLevel} instance. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static StatsLevel getStatsLevel(final byte value) { + for (final StatsLevel statsLevel : StatsLevel.values()) { + if (statsLevel.getValue() == value){ + return statsLevel; + } + } + throw new IllegalArgumentException( + "Illegal value provided for StatsLevel."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Status.java b/src/rocksdb/java/src/main/java/org/rocksdb/Status.java new file mode 100644 index 000000000..033ed3ea1 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Status.java @@ -0,0 +1,155 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Objects; + +/** + * Represents the status returned by a function call in RocksDB. + * + * Currently only used with {@link RocksDBException} when the + * status is not {@link Code#Ok} + */ +public class Status { + private final Code code; + /* @Nullable */ private final SubCode subCode; + /* @Nullable */ private final String state; + + public Status(final Code code, final SubCode subCode, final String state) { + this.code = code; + this.subCode = subCode; + this.state = state; + } + + /** + * Intentionally private as this will be called from JNI + */ + private Status(final byte code, final byte subCode, final String state) { + this.code = Code.getCode(code); + this.subCode = SubCode.getSubCode(subCode); + this.state = state; + } + + public Code getCode() { + return code; + } + + public SubCode getSubCode() { + return subCode; + } + + public String getState() { + return state; + } + + public String getCodeString() { + final StringBuilder builder = new StringBuilder() + .append(code.name()); + if(subCode != null && subCode != SubCode.None) { + builder.append("(") + .append(subCode.name()) + .append(")"); + } + return builder.toString(); + } + + // should stay in sync with /include/rocksdb/status.h:Code and /java/rocksjni/portal.h:toJavaStatusCode + public enum Code { + Ok( (byte)0x0), + NotFound( (byte)0x1), + Corruption( (byte)0x2), + NotSupported( (byte)0x3), + InvalidArgument( (byte)0x4), + IOError( (byte)0x5), + MergeInProgress( (byte)0x6), + Incomplete( (byte)0x7), + ShutdownInProgress( (byte)0x8), + TimedOut( (byte)0x9), + Aborted( (byte)0xA), + Busy( (byte)0xB), + Expired( (byte)0xC), + TryAgain( (byte)0xD), + Undefined( (byte)0x7F); + + private final byte value; + + Code(final byte value) { + this.value = value; + } + + public static Code getCode(final byte value) { + for (final Code code : Code.values()) { + if (code.value == value){ + return code; + } + } + throw new IllegalArgumentException( + "Illegal value provided for Code (" + value + ")."); + } + + /** + * Returns the byte value of the enumerations value. + * + * @return byte representation + */ + public byte getValue() { + return value; + } + } + + // should stay in sync with /include/rocksdb/status.h:SubCode and /java/rocksjni/portal.h:toJavaStatusSubCode + public enum SubCode { + None( (byte)0x0), + MutexTimeout( (byte)0x1), + LockTimeout( (byte)0x2), + LockLimit( (byte)0x3), + NoSpace( (byte)0x4), + Deadlock( (byte)0x5), + StaleFile( (byte)0x6), + MemoryLimit( (byte)0x7), + Undefined( (byte)0x7F); + + private final byte value; + + SubCode(final byte value) { + this.value = value; + } + + public static SubCode getSubCode(final byte value) { + for (final SubCode subCode : SubCode.values()) { + if (subCode.value == value){ + return subCode; + } + } + throw new IllegalArgumentException( + "Illegal value provided for SubCode (" + value + ")."); + } + + /** + * Returns the byte value of the enumerations value. + * + * @return byte representation + */ + public byte getValue() { + return value; + } + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + Status status = (Status) o; + return code == status.code && subCode == status.subCode && Objects.equals(state, status.state); + } + + @Override + public int hashCode() { + return Objects.hash(code, subCode, state); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java b/src/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java new file mode 100644 index 000000000..ddbccff46 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java @@ -0,0 +1,29 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * StringAppendOperator is a merge operator that concatenates + * two strings. + */ +public class StringAppendOperator extends MergeOperator { + public StringAppendOperator() { + this(','); + } + + public StringAppendOperator(char delim) { + super(newSharedStringAppendOperator(delim)); + } + + public StringAppendOperator(String delim) { + super(newSharedStringAppendOperator(delim)); + } + + private native static long newSharedStringAppendOperator(final char delim); + private native static long newSharedStringAppendOperator(final String delim); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java new file mode 100644 index 000000000..5a383ade4 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationBriefInfo.java @@ -0,0 +1,107 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Objects; + +public class TableFileCreationBriefInfo { + private final String dbName; + private final String columnFamilyName; + private final String filePath; + private final int jobId; + private final TableFileCreationReason reason; + + /** + * Access is private as this will only be constructed from + * C++ via JNI, either directly of via + * {@link TableFileCreationInfo#TableFileCreationInfo(long, TableProperties, Status, String, + * String, String, int, byte)}. + * + * @param dbName the database name + * @param columnFamilyName the column family name + * @param filePath the path to the table file + * @param jobId the job identifier + * @param tableFileCreationReasonValue the reason for creation of the table file + */ + protected TableFileCreationBriefInfo(final String dbName, final String columnFamilyName, + final String filePath, final int jobId, final byte tableFileCreationReasonValue) { + this.dbName = dbName; + this.columnFamilyName = columnFamilyName; + this.filePath = filePath; + this.jobId = jobId; + this.reason = TableFileCreationReason.fromValue(tableFileCreationReasonValue); + } + + /** + * Get the name of the database where the file was created. + * + * @return the name of the database. + */ + public String getDbName() { + return dbName; + } + + /** + * Get the name of the column family where the file was created. + * + * @return the name of the column family. + */ + public String getColumnFamilyName() { + return columnFamilyName; + } + + /** + * Get the path to the created file. + * + * @return the path. + */ + public String getFilePath() { + return filePath; + } + + /** + * Get the id of the job (which could be flush or compaction) that + * created the file. + * + * @return the id of the job. + */ + public int getJobId() { + return jobId; + } + + /** + * Get the reason for creating the table. + * + * @return the reason for creating the table. + */ + public TableFileCreationReason getReason() { + return reason; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + TableFileCreationBriefInfo that = (TableFileCreationBriefInfo) o; + return jobId == that.jobId && Objects.equals(dbName, that.dbName) + && Objects.equals(columnFamilyName, that.columnFamilyName) + && Objects.equals(filePath, that.filePath) && reason == that.reason; + } + + @Override + public int hashCode() { + return Objects.hash(dbName, columnFamilyName, filePath, jobId, reason); + } + + @Override + public String toString() { + return "TableFileCreationBriefInfo{" + + "dbName='" + dbName + '\'' + ", columnFamilyName='" + columnFamilyName + '\'' + + ", filePath='" + filePath + '\'' + ", jobId=" + jobId + ", reason=" + reason + '}'; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationInfo.java new file mode 100644 index 000000000..7742f32f1 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationInfo.java @@ -0,0 +1,86 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Objects; + +public class TableFileCreationInfo extends TableFileCreationBriefInfo { + private final long fileSize; + private final TableProperties tableProperties; + private final Status status; + + /** + * Access is protected as this will only be constructed from + * C++ via JNI. + * + * @param fileSize the size of the table file + * @param tableProperties the properties of the table file + * @param status the status of the creation operation + * @param dbName the database name + * @param columnFamilyName the column family name + * @param filePath the path to the table file + * @param jobId the job identifier + * @param tableFileCreationReasonValue the reason for creation of the table file + */ + protected TableFileCreationInfo(final long fileSize, final TableProperties tableProperties, + final Status status, final String dbName, final String columnFamilyName, + final String filePath, final int jobId, final byte tableFileCreationReasonValue) { + super(dbName, columnFamilyName, filePath, jobId, tableFileCreationReasonValue); + this.fileSize = fileSize; + this.tableProperties = tableProperties; + this.status = status; + } + + /** + * Get the size of the file. + * + * @return the size. + */ + public long getFileSize() { + return fileSize; + } + + /** + * Get the detailed properties of the created file. + * + * @return the properties. + */ + public TableProperties getTableProperties() { + return tableProperties; + } + + /** + * Get the status indicating whether the creation was successful or not. + * + * @return the status. + */ + public Status getStatus() { + return status; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + TableFileCreationInfo that = (TableFileCreationInfo) o; + return fileSize == that.fileSize && Objects.equals(tableProperties, that.tableProperties) + && Objects.equals(status, that.status); + } + + @Override + public int hashCode() { + return Objects.hash(fileSize, tableProperties, status); + } + + @Override + public String toString() { + return "TableFileCreationInfo{" + + "fileSize=" + fileSize + ", tableProperties=" + tableProperties + ", status=" + status + + '}'; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationReason.java b/src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationReason.java new file mode 100644 index 000000000..d3984663d --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TableFileCreationReason.java @@ -0,0 +1,46 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum TableFileCreationReason { + FLUSH((byte) 0x00), + COMPACTION((byte) 0x01), + RECOVERY((byte) 0x02), + MISC((byte) 0x03); + + private final byte value; + + TableFileCreationReason(final byte value) { + this.value = value; + } + + /** + * Get the internal representation. + * + * @return the internal representation + */ + byte getValue() { + return value; + } + + /** + * Get the TableFileCreationReason from the internal representation value. + * + * @return the table file creation reason. + * + * @throws IllegalArgumentException if the value is unknown. + */ + static TableFileCreationReason fromValue(final byte value) { + for (final TableFileCreationReason tableFileCreationReason : TableFileCreationReason.values()) { + if (tableFileCreationReason.value == value) { + return tableFileCreationReason; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for TableFileCreationReason: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java new file mode 100644 index 000000000..8aad03ae8 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TableFileDeletionInfo.java @@ -0,0 +1,86 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Objects; + +public class TableFileDeletionInfo { + private final String dbName; + private final String filePath; + private final int jobId; + private final Status status; + + /** + * Access is package private as this will only be constructed from + * C++ via JNI and for testing. + */ + TableFileDeletionInfo( + final String dbName, final String filePath, final int jobId, final Status status) { + this.dbName = dbName; + this.filePath = filePath; + this.jobId = jobId; + this.status = status; + } + + /** + * Get the name of the database where the file was deleted. + * + * @return the name of the database. + */ + public String getDbName() { + return dbName; + } + + /** + * Get the path to the deleted file. + * + * @return the path. + */ + public String getFilePath() { + return filePath; + } + + /** + * Get the id of the job which deleted the file. + * + * @return the id of the job. + */ + public int getJobId() { + return jobId; + } + + /** + * Get the status indicating whether the deletion was successful or not. + * + * @return the status + */ + public Status getStatus() { + return status; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + TableFileDeletionInfo that = (TableFileDeletionInfo) o; + return jobId == that.jobId && Objects.equals(dbName, that.dbName) + && Objects.equals(filePath, that.filePath) && Objects.equals(status, that.status); + } + + @Override + public int hashCode() { + return Objects.hash(dbName, filePath, jobId, status); + } + + @Override + public String toString() { + return "TableFileDeletionInfo{" + + "dbName='" + dbName + '\'' + ", filePath='" + filePath + '\'' + ", jobId=" + jobId + + ", status=" + status + '}'; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TableFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/TableFilter.java new file mode 100644 index 000000000..a39a329fb --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TableFilter.java @@ -0,0 +1,21 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +/** + * Filter for iterating a table. + */ +public interface TableFilter { + + /** + * A callback to determine whether relevant keys for this scan exist in a + * given table based on the table's properties. The callback is passed the + * properties of each table during iteration. If the callback returns false, + * the table will not be scanned. This option only affects Iterators and has + * no impact on point lookups. + * + * @param tableProperties the table properties. + * + * @return true if the table should be scanned, false otherwise. + */ + boolean filter(final TableProperties tableProperties); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java new file mode 100644 index 000000000..dbe524c42 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java @@ -0,0 +1,22 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +/** + * TableFormatConfig is used to config the internal Table format of a RocksDB. + * To make a RocksDB to use a specific Table format, its associated + * TableFormatConfig should be properly set and passed into Options via + * Options.setTableFormatConfig() and open the db using that Options. + */ +public abstract class TableFormatConfig { + /** + *

This function should only be called by Options.setTableFormatConfig(), + * which will create a c++ shared-pointer to the c++ TableFactory + * that associated with the Java TableFormatConfig.

+ * + * @return native handle address to native table instance. + */ + abstract protected long newTableFactoryHandle(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TableProperties.java b/src/rocksdb/java/src/main/java/org/rocksdb/TableProperties.java new file mode 100644 index 000000000..096341a4c --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TableProperties.java @@ -0,0 +1,426 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; + +/** + * TableProperties contains read-only properties of its associated + * table. + */ +public class TableProperties { + private final long dataSize; + private final long indexSize; + private final long indexPartitions; + private final long topLevelIndexSize; + private final long indexKeyIsUserKey; + private final long indexValueIsDeltaEncoded; + private final long filterSize; + private final long rawKeySize; + private final long rawValueSize; + private final long numDataBlocks; + private final long numEntries; + private final long numDeletions; + private final long numMergeOperands; + private final long numRangeDeletions; + private final long formatVersion; + private final long fixedKeyLen; + private final long columnFamilyId; + private final long creationTime; + private final long oldestKeyTime; + private final long slowCompressionEstimatedDataSize; + private final long fastCompressionEstimatedDataSize; + private final long externalSstFileGlobalSeqnoOffset; + private final byte[] columnFamilyName; + private final String filterPolicyName; + private final String comparatorName; + private final String mergeOperatorName; + private final String prefixExtractorName; + private final String propertyCollectorsNames; + private final String compressionName; + private final Map userCollectedProperties; + private final Map readableProperties; + + /** + * Access is package private as this will only be constructed from + * C++ via JNI and for testing. + */ + TableProperties(final long dataSize, final long indexSize, final long indexPartitions, + final long topLevelIndexSize, final long indexKeyIsUserKey, + final long indexValueIsDeltaEncoded, final long filterSize, final long rawKeySize, + final long rawValueSize, final long numDataBlocks, final long numEntries, + final long numDeletions, final long numMergeOperands, final long numRangeDeletions, + final long formatVersion, final long fixedKeyLen, final long columnFamilyId, + final long creationTime, final long oldestKeyTime, + final long slowCompressionEstimatedDataSize, final long fastCompressionEstimatedDataSize, + final long externalSstFileGlobalSeqnoOffset, final byte[] columnFamilyName, + final String filterPolicyName, final String comparatorName, final String mergeOperatorName, + final String prefixExtractorName, final String propertyCollectorsNames, + final String compressionName, final Map userCollectedProperties, + final Map readableProperties) { + this.dataSize = dataSize; + this.indexSize = indexSize; + this.indexPartitions = indexPartitions; + this.topLevelIndexSize = topLevelIndexSize; + this.indexKeyIsUserKey = indexKeyIsUserKey; + this.indexValueIsDeltaEncoded = indexValueIsDeltaEncoded; + this.filterSize = filterSize; + this.rawKeySize = rawKeySize; + this.rawValueSize = rawValueSize; + this.numDataBlocks = numDataBlocks; + this.numEntries = numEntries; + this.numDeletions = numDeletions; + this.numMergeOperands = numMergeOperands; + this.numRangeDeletions = numRangeDeletions; + this.formatVersion = formatVersion; + this.fixedKeyLen = fixedKeyLen; + this.columnFamilyId = columnFamilyId; + this.creationTime = creationTime; + this.oldestKeyTime = oldestKeyTime; + this.slowCompressionEstimatedDataSize = slowCompressionEstimatedDataSize; + this.fastCompressionEstimatedDataSize = fastCompressionEstimatedDataSize; + this.externalSstFileGlobalSeqnoOffset = externalSstFileGlobalSeqnoOffset; + this.columnFamilyName = columnFamilyName; + this.filterPolicyName = filterPolicyName; + this.comparatorName = comparatorName; + this.mergeOperatorName = mergeOperatorName; + this.prefixExtractorName = prefixExtractorName; + this.propertyCollectorsNames = propertyCollectorsNames; + this.compressionName = compressionName; + this.userCollectedProperties = userCollectedProperties; + this.readableProperties = readableProperties; + } + + /** + * Get the total size of all data blocks. + * + * @return the total size of all data blocks. + */ + public long getDataSize() { + return dataSize; + } + + /** + * Get the size of index block. + * + * @return the size of index block. + */ + public long getIndexSize() { + return indexSize; + } + + /** + * Get the total number of index partitions + * if {@link IndexType#kTwoLevelIndexSearch} is used. + * + * @return the total number of index partitions. + */ + public long getIndexPartitions() { + return indexPartitions; + } + + /** + * Size of the top-level index + * if {@link IndexType#kTwoLevelIndexSearch} is used. + * + * @return the size of the top-level index. + */ + public long getTopLevelIndexSize() { + return topLevelIndexSize; + } + + /** + * Whether the index key is user key. + * Otherwise it includes 8 byte of sequence + * number added by internal key format. + * + * @return the index key + */ + public long getIndexKeyIsUserKey() { + return indexKeyIsUserKey; + } + + /** + * Whether delta encoding is used to encode the index values. + * + * @return whether delta encoding is used to encode the index values. + */ + public long getIndexValueIsDeltaEncoded() { + return indexValueIsDeltaEncoded; + } + + /** + * Get the size of filter block. + * + * @return the size of filter block. + */ + public long getFilterSize() { + return filterSize; + } + + /** + * Get the total raw key size. + * + * @return the total raw key size. + */ + public long getRawKeySize() { + return rawKeySize; + } + + /** + * Get the total raw value size. + * + * @return the total raw value size. + */ + public long getRawValueSize() { + return rawValueSize; + } + + /** + * Get the number of blocks in this table. + * + * @return the number of blocks in this table. + */ + public long getNumDataBlocks() { + return numDataBlocks; + } + + /** + * Get the number of entries in this table. + * + * @return the number of entries in this table. + */ + public long getNumEntries() { + return numEntries; + } + + /** + * Get the number of deletions in the table. + * + * @return the number of deletions in the table. + */ + public long getNumDeletions() { + return numDeletions; + } + + /** + * Get the number of merge operands in the table. + * + * @return the number of merge operands in the table. + */ + public long getNumMergeOperands() { + return numMergeOperands; + } + + /** + * Get the number of range deletions in this table. + * + * @return the number of range deletions in this table. + */ + public long getNumRangeDeletions() { + return numRangeDeletions; + } + + /** + * Get the format version, reserved for backward compatibility. + * + * @return the format version. + */ + public long getFormatVersion() { + return formatVersion; + } + + /** + * Get the length of the keys. + * + * @return 0 when the key is variable length, otherwise number of + * bytes for each key. + */ + public long getFixedKeyLen() { + return fixedKeyLen; + } + + /** + * Get the ID of column family for this SST file, + * corresponding to the column family identified by + * {@link #getColumnFamilyName()}. + * + * @return the id of the column family. + */ + public long getColumnFamilyId() { + return columnFamilyId; + } + + /** + * The time when the SST file was created. + * Since SST files are immutable, this is equivalent + * to last modified time. + * + * @return the created time. + */ + public long getCreationTime() { + return creationTime; + } + + /** + * Get the timestamp of the earliest key. + * + * @return 0 means unknown, otherwise the timestamp. + */ + public long getOldestKeyTime() { + return oldestKeyTime; + } + + /** + * Get the estimated size of data blocks compressed with a relatively slower + * compression algorithm. + * + * @return 0 means unknown, otherwise the timestamp. + */ + public long getSlowCompressionEstimatedDataSize() { + return slowCompressionEstimatedDataSize; + } + + /** + * Get the estimated size of data blocks compressed with a relatively faster + * compression algorithm. + * + * @return 0 means unknown, otherwise the timestamp. + */ + public long getFastCompressionEstimatedDataSize() { + return fastCompressionEstimatedDataSize; + } + + /** + * Get the name of the column family with which this + * SST file is associated. + * + * @return the name of the column family, or null if the + * column family is unknown. + */ + /*@Nullable*/ public byte[] getColumnFamilyName() { + return columnFamilyName; + } + + /** + * Get the name of the filter policy used in this table. + * + * @return the name of the filter policy, or null if + * no filter policy is used. + */ + /*@Nullable*/ public String getFilterPolicyName() { + return filterPolicyName; + } + + /** + * Get the name of the comparator used in this table. + * + * @return the name of the comparator. + */ + public String getComparatorName() { + return comparatorName; + } + + /** + * Get the name of the merge operator used in this table. + * + * @return the name of the merge operator, or null if no merge operator + * is used. + */ + /*@Nullable*/ public String getMergeOperatorName() { + return mergeOperatorName; + } + + /** + * Get the name of the prefix extractor used in this table. + * + * @return the name of the prefix extractor, or null if no prefix + * extractor is used. + */ + /*@Nullable*/ public String getPrefixExtractorName() { + return prefixExtractorName; + } + + /** + * Get the names of the property collectors factories used in this table. + * + * @return the names of the property collector factories separated + * by commas, e.g. {collector_name[1]},{collector_name[2]},... + */ + public String getPropertyCollectorsNames() { + return propertyCollectorsNames; + } + + /** + * Get the name of the compression algorithm used to compress the SST files. + * + * @return the name of the compression algorithm. + */ + public String getCompressionName() { + return compressionName; + } + + /** + * Get the user collected properties. + * + * @return the user collected properties. + */ + public Map getUserCollectedProperties() { + return userCollectedProperties; + } + + /** + * Get the readable properties. + * + * @return the readable properties. + */ + public Map getReadableProperties() { + return readableProperties; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + TableProperties that = (TableProperties) o; + return dataSize == that.dataSize && indexSize == that.indexSize + && indexPartitions == that.indexPartitions && topLevelIndexSize == that.topLevelIndexSize + && indexKeyIsUserKey == that.indexKeyIsUserKey + && indexValueIsDeltaEncoded == that.indexValueIsDeltaEncoded + && filterSize == that.filterSize && rawKeySize == that.rawKeySize + && rawValueSize == that.rawValueSize && numDataBlocks == that.numDataBlocks + && numEntries == that.numEntries && numDeletions == that.numDeletions + && numMergeOperands == that.numMergeOperands && numRangeDeletions == that.numRangeDeletions + && formatVersion == that.formatVersion && fixedKeyLen == that.fixedKeyLen + && columnFamilyId == that.columnFamilyId && creationTime == that.creationTime + && oldestKeyTime == that.oldestKeyTime + && slowCompressionEstimatedDataSize == that.slowCompressionEstimatedDataSize + && fastCompressionEstimatedDataSize == that.fastCompressionEstimatedDataSize + && externalSstFileGlobalSeqnoOffset == that.externalSstFileGlobalSeqnoOffset + && Arrays.equals(columnFamilyName, that.columnFamilyName) + && Objects.equals(filterPolicyName, that.filterPolicyName) + && Objects.equals(comparatorName, that.comparatorName) + && Objects.equals(mergeOperatorName, that.mergeOperatorName) + && Objects.equals(prefixExtractorName, that.prefixExtractorName) + && Objects.equals(propertyCollectorsNames, that.propertyCollectorsNames) + && Objects.equals(compressionName, that.compressionName) + && Objects.equals(userCollectedProperties, that.userCollectedProperties) + && Objects.equals(readableProperties, that.readableProperties); + } + + @Override + public int hashCode() { + int result = Objects.hash(dataSize, indexSize, indexPartitions, topLevelIndexSize, + indexKeyIsUserKey, indexValueIsDeltaEncoded, filterSize, rawKeySize, rawValueSize, + numDataBlocks, numEntries, numDeletions, numMergeOperands, numRangeDeletions, formatVersion, + fixedKeyLen, columnFamilyId, creationTime, oldestKeyTime, slowCompressionEstimatedDataSize, + fastCompressionEstimatedDataSize, externalSstFileGlobalSeqnoOffset, filterPolicyName, + comparatorName, mergeOperatorName, prefixExtractorName, propertyCollectorsNames, + compressionName, userCollectedProperties, readableProperties); + result = 31 * result + Arrays.hashCode(columnFamilyName); + return result; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ThreadStatus.java b/src/rocksdb/java/src/main/java/org/rocksdb/ThreadStatus.java new file mode 100644 index 000000000..062df5889 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ThreadStatus.java @@ -0,0 +1,224 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Map; + +public class ThreadStatus { + private final long threadId; + private final ThreadType threadType; + private final String dbName; + private final String cfName; + private final OperationType operationType; + private final long operationElapsedTime; // microseconds + private final OperationStage operationStage; + private final long operationProperties[]; + private final StateType stateType; + + /** + * Invoked from C++ via JNI + */ + private ThreadStatus(final long threadId, + final byte threadTypeValue, + final String dbName, + final String cfName, + final byte operationTypeValue, + final long operationElapsedTime, + final byte operationStageValue, + final long[] operationProperties, + final byte stateTypeValue) { + this.threadId = threadId; + this.threadType = ThreadType.fromValue(threadTypeValue); + this.dbName = dbName; + this.cfName = cfName; + this.operationType = OperationType.fromValue(operationTypeValue); + this.operationElapsedTime = operationElapsedTime; + this.operationStage = OperationStage.fromValue(operationStageValue); + this.operationProperties = operationProperties; + this.stateType = StateType.fromValue(stateTypeValue); + } + + /** + * Get the unique ID of the thread. + * + * @return the thread id + */ + public long getThreadId() { + return threadId; + } + + /** + * Get the type of the thread. + * + * @return the type of the thread. + */ + public ThreadType getThreadType() { + return threadType; + } + + /** + * The name of the DB instance that the thread is currently + * involved with. + * + * @return the name of the db, or null if the thread is not involved + * in any DB operation. + */ + /* @Nullable */ public String getDbName() { + return dbName; + } + + /** + * The name of the Column Family that the thread is currently + * involved with. + * + * @return the name of the db, or null if the thread is not involved + * in any column Family operation. + */ + /* @Nullable */ public String getCfName() { + return cfName; + } + + /** + * Get the operation (high-level action) that the current thread is involved + * with. + * + * @return the operation + */ + public OperationType getOperationType() { + return operationType; + } + + /** + * Get the elapsed time of the current thread operation in microseconds. + * + * @return the elapsed time + */ + public long getOperationElapsedTime() { + return operationElapsedTime; + } + + /** + * Get the current stage where the thread is involved in the current + * operation. + * + * @return the current stage of the current operation + */ + public OperationStage getOperationStage() { + return operationStage; + } + + /** + * Get the list of properties that describe some details about the current + * operation. + * + * Each field in might have different meanings for different operations. + * + * @return the properties + */ + public long[] getOperationProperties() { + return operationProperties; + } + + /** + * Get the state (lower-level action) that the current thread is involved + * with. + * + * @return the state + */ + public StateType getStateType() { + return stateType; + } + + /** + * Get the name of the thread type. + * + * @param threadType the thread type + * + * @return the name of the thread type. + */ + public static String getThreadTypeName(final ThreadType threadType) { + return getThreadTypeName(threadType.getValue()); + } + + /** + * Get the name of an operation given its type. + * + * @param operationType the type of operation. + * + * @return the name of the operation. + */ + public static String getOperationName(final OperationType operationType) { + return getOperationName(operationType.getValue()); + } + + public static String microsToString(final long operationElapsedTime) { + return microsToStringNative(operationElapsedTime); + } + + /** + * Obtain a human-readable string describing the specified operation stage. + * + * @param operationStage the stage of the operation. + * + * @return the description of the operation stage. + */ + public static String getOperationStageName( + final OperationStage operationStage) { + return getOperationStageName(operationStage.getValue()); + } + + /** + * Obtain the name of the "i"th operation property of the + * specified operation. + * + * @param operationType the operation type. + * @param i the index of the operation property. + * + * @return the name of the operation property + */ + public static String getOperationPropertyName( + final OperationType operationType, final int i) { + return getOperationPropertyName(operationType.getValue(), i); + } + + /** + * Translate the "i"th property of the specified operation given + * a property value. + * + * @param operationType the operation type. + * @param operationProperties the operation properties. + * + * @return the property values. + */ + public static Map interpretOperationProperties( + final OperationType operationType, final long[] operationProperties) { + return interpretOperationProperties(operationType.getValue(), + operationProperties); + } + + /** + * Obtain the name of a state given its type. + * + * @param stateType the state type. + * + * @return the name of the state. + */ + public static String getStateName(final StateType stateType) { + return getStateName(stateType.getValue()); + } + + private static native String getThreadTypeName(final byte threadTypeValue); + private static native String getOperationName(final byte operationTypeValue); + private static native String microsToStringNative( + final long operationElapsedTime); + private static native String getOperationStageName( + final byte operationStageTypeValue); + private static native String getOperationPropertyName( + final byte operationTypeValue, final int i); + private static native MapinterpretOperationProperties( + final byte operationTypeValue, final long[] operationProperties); + private static native String getStateName(final byte stateTypeValue); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/ThreadType.java b/src/rocksdb/java/src/main/java/org/rocksdb/ThreadType.java new file mode 100644 index 000000000..cc329f442 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/ThreadType.java @@ -0,0 +1,65 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The type of a thread. + */ +public enum ThreadType { + /** + * RocksDB BG thread in high-pri thread pool. + */ + HIGH_PRIORITY((byte)0x0), + + /** + * RocksDB BG thread in low-pri thread pool. + */ + LOW_PRIORITY((byte)0x1), + + /** + * User thread (Non-RocksDB BG thread). + */ + USER((byte)0x2), + + /** + * RocksDB BG thread in bottom-pri thread pool + */ + BOTTOM_PRIORITY((byte)0x3); + + private final byte value; + + ThreadType(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value. + */ + byte getValue() { + return value; + } + + /** + * Get the Thread type from the internal representation value. + * + * @param value the internal representation value. + * + * @return the thread type + * + * @throws IllegalArgumentException if the value does not match a ThreadType + */ + static ThreadType fromValue(final byte value) + throws IllegalArgumentException { + for (final ThreadType threadType : ThreadType.values()) { + if (threadType.value == value) { + return threadType; + } + } + throw new IllegalArgumentException("Unknown value for ThreadType: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TickerType.java b/src/rocksdb/java/src/main/java/org/rocksdb/TickerType.java new file mode 100644 index 000000000..0d00add5b --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TickerType.java @@ -0,0 +1,874 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The logical mapping of tickers defined in rocksdb::Tickers. + * + * Java byte value mappings don't align 1:1 to the c++ values. c++ rocksdb::Tickers enumeration type + * is uint32_t and java org.rocksdb.TickerType is byte, this causes mapping issues when + * rocksdb::Tickers value is greater then 127 (0x7F) for jbyte jni interface as range greater is not + * available. Without breaking interface in minor versions, value mappings for + * org.rocksdb.TickerType leverage full byte range [-128 (-0x80), (0x7F)]. Newer tickers added + * should descend into negative values until TICKER_ENUM_MAX reaches -128 (-0x80). + */ +public enum TickerType { + + /** + * total block cache misses + * + * REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS + + * BLOCK_CACHE_FILTER_MISS + + * BLOCK_CACHE_DATA_MISS; + */ + BLOCK_CACHE_MISS((byte) 0x0), + + /** + * total block cache hit + * + * REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT + + * BLOCK_CACHE_FILTER_HIT + + * BLOCK_CACHE_DATA_HIT; + */ + BLOCK_CACHE_HIT((byte) 0x1), + + BLOCK_CACHE_ADD((byte) 0x2), + + /** + * # of failures when adding blocks to block cache. + */ + BLOCK_CACHE_ADD_FAILURES((byte) 0x3), + + /** + * # of times cache miss when accessing index block from block cache. + */ + BLOCK_CACHE_INDEX_MISS((byte) 0x4), + + /** + * # of times cache hit when accessing index block from block cache. + */ + BLOCK_CACHE_INDEX_HIT((byte) 0x5), + + /** + * # of index blocks added to block cache. + */ + BLOCK_CACHE_INDEX_ADD((byte) 0x6), + + /** + * # of bytes of index blocks inserted into cache + */ + BLOCK_CACHE_INDEX_BYTES_INSERT((byte) 0x7), + + /** + * # of bytes of index block erased from cache + */ + BLOCK_CACHE_INDEX_BYTES_EVICT((byte) 0x8), + + /** + * # of times cache miss when accessing filter block from block cache. + */ + BLOCK_CACHE_FILTER_MISS((byte) 0x9), + + /** + * # of times cache hit when accessing filter block from block cache. + */ + BLOCK_CACHE_FILTER_HIT((byte) 0xA), + + /** + * # of filter blocks added to block cache. + */ + BLOCK_CACHE_FILTER_ADD((byte) 0xB), + + /** + * # of bytes of bloom filter blocks inserted into cache + */ + BLOCK_CACHE_FILTER_BYTES_INSERT((byte) 0xC), + + /** + * # of bytes of bloom filter block erased from cache + */ + BLOCK_CACHE_FILTER_BYTES_EVICT((byte) 0xD), + + /** + * # of times cache miss when accessing data block from block cache. + */ + BLOCK_CACHE_DATA_MISS((byte) 0xE), + + /** + * # of times cache hit when accessing data block from block cache. + */ + BLOCK_CACHE_DATA_HIT((byte) 0xF), + + /** + * # of data blocks added to block cache. + */ + BLOCK_CACHE_DATA_ADD((byte) 0x10), + + /** + * # of bytes of data blocks inserted into cache + */ + BLOCK_CACHE_DATA_BYTES_INSERT((byte) 0x11), + + /** + * # of bytes read from cache. + */ + BLOCK_CACHE_BYTES_READ((byte) 0x12), + + /** + * # of bytes written into cache. + */ + BLOCK_CACHE_BYTES_WRITE((byte) 0x13), + + /** + * # of times bloom filter has avoided file reads. + */ + BLOOM_FILTER_USEFUL((byte) 0x14), + + /** + * # persistent cache hit + */ + PERSISTENT_CACHE_HIT((byte) 0x15), + + /** + * # persistent cache miss + */ + PERSISTENT_CACHE_MISS((byte) 0x16), + + /** + * # total simulation block cache hits + */ + SIM_BLOCK_CACHE_HIT((byte) 0x17), + + /** + * # total simulation block cache misses + */ + SIM_BLOCK_CACHE_MISS((byte) 0x18), + + /** + * # of memtable hits. + */ + MEMTABLE_HIT((byte) 0x19), + + /** + * # of memtable misses. + */ + MEMTABLE_MISS((byte) 0x1A), + + /** + * # of Get() queries served by L0 + */ + GET_HIT_L0((byte) 0x1B), + + /** + * # of Get() queries served by L1 + */ + GET_HIT_L1((byte) 0x1C), + + /** + * # of Get() queries served by L2 and up + */ + GET_HIT_L2_AND_UP((byte) 0x1D), + + /** + * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction + * There are 4 reasons currently. + */ + + /** + * key was written with a newer value. + */ + COMPACTION_KEY_DROP_NEWER_ENTRY((byte) 0x1E), + + /** + * Also includes keys dropped for range del. + * The key is obsolete. + */ + COMPACTION_KEY_DROP_OBSOLETE((byte) 0x1F), + + /** + * key was covered by a range tombstone. + */ + COMPACTION_KEY_DROP_RANGE_DEL((byte) 0x20), + + /** + * User compaction function has dropped the key. + */ + COMPACTION_KEY_DROP_USER((byte) 0x21), + + /** + * all keys in range were deleted. + */ + COMPACTION_RANGE_DEL_DROP_OBSOLETE((byte) 0x22), + + /** + * Number of keys written to the database via the Put and Write call's. + */ + NUMBER_KEYS_WRITTEN((byte) 0x23), + + /** + * Number of Keys read. + */ + NUMBER_KEYS_READ((byte) 0x24), + + /** + * Number keys updated, if inplace update is enabled + */ + NUMBER_KEYS_UPDATED((byte) 0x25), + + /** + * The number of uncompressed bytes issued by DB::Put(), DB::Delete(),\ + * DB::Merge(), and DB::Write(). + */ + BYTES_WRITTEN((byte) 0x26), + + /** + * The number of uncompressed bytes read from DB::Get(). It could be + * either from memtables, cache, or table files. + * + * For the number of logical bytes read from DB::MultiGet(), + * please use {@link #NUMBER_MULTIGET_BYTES_READ}. + */ + BYTES_READ((byte) 0x27), + + /** + * The number of calls to seek. + */ + NUMBER_DB_SEEK((byte) 0x28), + + /** + * The number of calls to next. + */ + NUMBER_DB_NEXT((byte) 0x29), + + /** + * The number of calls to prev. + */ + NUMBER_DB_PREV((byte) 0x2A), + + /** + * The number of calls to seek that returned data. + */ + NUMBER_DB_SEEK_FOUND((byte) 0x2B), + + /** + * The number of calls to next that returned data. + */ + NUMBER_DB_NEXT_FOUND((byte) 0x2C), + + /** + * The number of calls to prev that returned data. + */ + NUMBER_DB_PREV_FOUND((byte) 0x2D), + + /** + * The number of uncompressed bytes read from an iterator. + * Includes size of key and value. + */ + ITER_BYTES_READ((byte) 0x2E), + + NO_FILE_CLOSES((byte) 0x2F), + + NO_FILE_OPENS((byte) 0x30), + + NO_FILE_ERRORS((byte) 0x31), + + /** + * Time system had to wait to do LO-L1 compactions. + * + * @deprecated + */ + @Deprecated + STALL_L0_SLOWDOWN_MICROS((byte) 0x32), + + /** + * Time system had to wait to move memtable to L1. + * + * @deprecated + */ + @Deprecated + STALL_MEMTABLE_COMPACTION_MICROS((byte) 0x33), + + /** + * write throttle because of too many files in L0. + * + * @deprecated + */ + @Deprecated + STALL_L0_NUM_FILES_MICROS((byte) 0x34), + + /** + * Writer has to wait for compaction or flush to finish. + */ + STALL_MICROS((byte) 0x35), + + /** + * The wait time for db mutex. + * + * Disabled by default. To enable it set stats level to {@link StatsLevel#ALL} + */ + DB_MUTEX_WAIT_MICROS((byte) 0x36), + + RATE_LIMIT_DELAY_MILLIS((byte) 0x37), + + /** + * Number of iterators created. + * + */ + NO_ITERATORS((byte) 0x38), + + /** + * Number of MultiGet calls. + */ + NUMBER_MULTIGET_CALLS((byte) 0x39), + + /** + * Number of MultiGet keys read. + */ + NUMBER_MULTIGET_KEYS_READ((byte) 0x3A), + + /** + * Number of MultiGet bytes read. + */ + NUMBER_MULTIGET_BYTES_READ((byte) 0x3B), + + /** + * Number of deletes records that were not required to be + * written to storage because key does not exist. + */ + NUMBER_FILTERED_DELETES((byte) 0x3C), + NUMBER_MERGE_FAILURES((byte) 0x3D), + + /** + * Number of times bloom was checked before creating iterator on a + * file, and the number of times the check was useful in avoiding + * iterator creation (and thus likely IOPs). + */ + BLOOM_FILTER_PREFIX_CHECKED((byte) 0x3E), + BLOOM_FILTER_PREFIX_USEFUL((byte) 0x3F), + + /** + * Number of times we had to reseek inside an iteration to skip + * over large number of keys with same userkey. + */ + NUMBER_OF_RESEEKS_IN_ITERATION((byte) 0x40), + + /** + * Record the number of calls to {@link RocksDB#getUpdatesSince(long)}. Useful to keep track of + * transaction log iterator refreshes. + */ + GET_UPDATES_SINCE_CALLS((byte) 0x41), + + /** + * Miss in the compressed block cache. + */ + BLOCK_CACHE_COMPRESSED_MISS((byte) 0x42), + + /** + * Hit in the compressed block cache. + */ + BLOCK_CACHE_COMPRESSED_HIT((byte) 0x43), + + /** + * Number of blocks added to compressed block cache. + */ + BLOCK_CACHE_COMPRESSED_ADD((byte) 0x44), + + /** + * Number of failures when adding blocks to compressed block cache. + */ + BLOCK_CACHE_COMPRESSED_ADD_FAILURES((byte) 0x45), + + /** + * Number of times WAL sync is done. + */ + WAL_FILE_SYNCED((byte) 0x46), + + /** + * Number of bytes written to WAL. + */ + WAL_FILE_BYTES((byte) 0x47), + + /** + * Writes can be processed by requesting thread or by the thread at the + * head of the writers queue. + */ + WRITE_DONE_BY_SELF((byte) 0x48), + + /** + * Equivalent to writes done for others. + */ + WRITE_DONE_BY_OTHER((byte) 0x49), + + /** + * Number of writes ending up with timed-out. + */ + WRITE_TIMEDOUT((byte) 0x4A), + + /** + * Number of Write calls that request WAL. + */ + WRITE_WITH_WAL((byte) 0x4B), + + /** + * Bytes read during compaction. + */ + COMPACT_READ_BYTES((byte) 0x4C), + + /** + * Bytes written during compaction. + */ + COMPACT_WRITE_BYTES((byte) 0x4D), + + /** + * Bytes written during flush. + */ + FLUSH_WRITE_BYTES((byte) 0x4E), + + /** + * Number of table's properties loaded directly from file, without creating + * table reader object. + */ + NUMBER_DIRECT_LOAD_TABLE_PROPERTIES((byte) 0x4F), + NUMBER_SUPERVERSION_ACQUIRES((byte) 0x50), + NUMBER_SUPERVERSION_RELEASES((byte) 0x51), + NUMBER_SUPERVERSION_CLEANUPS((byte) 0x52), + + /** + * # of compressions/decompressions executed + */ + NUMBER_BLOCK_COMPRESSED((byte) 0x53), + NUMBER_BLOCK_DECOMPRESSED((byte) 0x54), + + NUMBER_BLOCK_NOT_COMPRESSED((byte) 0x55), + MERGE_OPERATION_TOTAL_TIME((byte) 0x56), + FILTER_OPERATION_TOTAL_TIME((byte) 0x57), + + /** + * Row cache. + */ + ROW_CACHE_HIT((byte) 0x58), + ROW_CACHE_MISS((byte) 0x59), + + /** + * Read amplification statistics. + * + * Read amplification can be calculated using this formula + * (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES) + * + * REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled + */ + + /** + * Estimate of total bytes actually used. + */ + READ_AMP_ESTIMATE_USEFUL_BYTES((byte) 0x5A), + + /** + * Total size of loaded data blocks. + */ + READ_AMP_TOTAL_READ_BYTES((byte) 0x5B), + + /** + * Number of refill intervals where rate limiter's bytes are fully consumed. + */ + NUMBER_RATE_LIMITER_DRAINS((byte) 0x5C), + + /** + * Number of internal skipped during iteration + */ + NUMBER_ITER_SKIP((byte) 0x5D), + + /** + * Number of MultiGet keys found (vs number requested) + */ + NUMBER_MULTIGET_KEYS_FOUND((byte) 0x5E), + + // -0x01 to fixate the new value that incorrectly changed TICKER_ENUM_MAX + /** + * Number of iterators created. + */ + NO_ITERATOR_CREATED((byte) -0x01), + + /** + * Number of iterators deleted. + */ + NO_ITERATOR_DELETED((byte) 0x60), + + /** + * Deletions obsoleted before bottom level due to file gap optimization. + */ + COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE((byte) 0x61), + + /** + * If a compaction was cancelled in sfm to prevent ENOSPC + */ + COMPACTION_CANCELLED((byte) 0x62), + + /** + * # of times bloom FullFilter has not avoided the reads. + */ + BLOOM_FILTER_FULL_POSITIVE((byte) 0x63), + + /** + * # of times bloom FullFilter has not avoided the reads and data actually + * exist. + */ + BLOOM_FILTER_FULL_TRUE_POSITIVE((byte) 0x64), + + /** + * BlobDB specific stats + * # of Put/PutTTL/PutUntil to BlobDB. + */ + BLOB_DB_NUM_PUT((byte) 0x65), + + /** + * # of Write to BlobDB. + */ + BLOB_DB_NUM_WRITE((byte) 0x66), + + /** + * # of Get to BlobDB. + */ + BLOB_DB_NUM_GET((byte) 0x67), + + /** + * # of MultiGet to BlobDB. + */ + BLOB_DB_NUM_MULTIGET((byte) 0x68), + + /** + * # of Seek/SeekToFirst/SeekToLast/SeekForPrev to BlobDB iterator. + */ + BLOB_DB_NUM_SEEK((byte) 0x69), + + /** + * # of Next to BlobDB iterator. + */ + BLOB_DB_NUM_NEXT((byte) 0x6A), + + /** + * # of Prev to BlobDB iterator. + */ + BLOB_DB_NUM_PREV((byte) 0x6B), + + /** + * # of keys written to BlobDB. + */ + BLOB_DB_NUM_KEYS_WRITTEN((byte) 0x6C), + + /** + * # of keys read from BlobDB. + */ + BLOB_DB_NUM_KEYS_READ((byte) 0x6D), + + /** + * # of bytes (key + value) written to BlobDB. + */ + BLOB_DB_BYTES_WRITTEN((byte) 0x6E), + + /** + * # of bytes (keys + value) read from BlobDB. + */ + BLOB_DB_BYTES_READ((byte) 0x6F), + + /** + * # of keys written by BlobDB as non-TTL inlined value. + */ + BLOB_DB_WRITE_INLINED((byte) 0x70), + + /** + * # of keys written by BlobDB as TTL inlined value. + */ + BLOB_DB_WRITE_INLINED_TTL((byte) 0x71), + + /** + * # of keys written by BlobDB as non-TTL blob value. + */ + BLOB_DB_WRITE_BLOB((byte) 0x72), + + /** + * # of keys written by BlobDB as TTL blob value. + */ + BLOB_DB_WRITE_BLOB_TTL((byte) 0x73), + + /** + * # of bytes written to blob file. + */ + BLOB_DB_BLOB_FILE_BYTES_WRITTEN((byte) 0x74), + + /** + * # of bytes read from blob file. + */ + BLOB_DB_BLOB_FILE_BYTES_READ((byte) 0x75), + + /** + * # of times a blob files being synced. + */ + BLOB_DB_BLOB_FILE_SYNCED((byte) 0x76), + + /** + * # of blob index evicted from base DB by BlobDB compaction filter because + * of expiration. + */ + BLOB_DB_BLOB_INDEX_EXPIRED_COUNT((byte) 0x77), + + /** + * Size of blob index evicted from base DB by BlobDB compaction filter + * because of expiration. + */ + BLOB_DB_BLOB_INDEX_EXPIRED_SIZE((byte) 0x78), + + /** + * # of blob index evicted from base DB by BlobDB compaction filter because + * of corresponding file deleted. + */ + BLOB_DB_BLOB_INDEX_EVICTED_COUNT((byte) 0x79), + + /** + * Size of blob index evicted from base DB by BlobDB compaction filter + * because of corresponding file deleted. + */ + BLOB_DB_BLOB_INDEX_EVICTED_SIZE((byte) 0x7A), + + /** + * # of blob files being garbage collected. + */ + BLOB_DB_GC_NUM_FILES((byte) 0x7B), + + /** + * # of blob files generated by garbage collection. + */ + BLOB_DB_GC_NUM_NEW_FILES((byte) 0x7C), + + /** + * # of BlobDB garbage collection failures. + */ + BLOB_DB_GC_FAILURES((byte) 0x7D), + + /** + * # of keys drop by BlobDB garbage collection because they had been + * overwritten. + */ + BLOB_DB_GC_NUM_KEYS_OVERWRITTEN((byte) 0x7E), + + /** + * # of keys drop by BlobDB garbage collection because of expiration. + */ + BLOB_DB_GC_NUM_KEYS_EXPIRED((byte) 0x7F), + + /** + * # of keys relocated to new blob file by garbage collection. + */ + BLOB_DB_GC_NUM_KEYS_RELOCATED((byte) -0x02), + + /** + * # of bytes drop by BlobDB garbage collection because they had been + * overwritten. + */ + BLOB_DB_GC_BYTES_OVERWRITTEN((byte) -0x03), + + /** + * # of bytes drop by BlobDB garbage collection because of expiration. + */ + BLOB_DB_GC_BYTES_EXPIRED((byte) -0x04), + + /** + * # of bytes relocated to new blob file by garbage collection. + */ + BLOB_DB_GC_BYTES_RELOCATED((byte) -0x05), + + /** + * # of blob files evicted because of BlobDB is full. + */ + BLOB_DB_FIFO_NUM_FILES_EVICTED((byte) -0x06), + + /** + * # of keys in the blob files evicted because of BlobDB is full. + */ + BLOB_DB_FIFO_NUM_KEYS_EVICTED((byte) -0x07), + + /** + * # of bytes in the blob files evicted because of BlobDB is full. + */ + BLOB_DB_FIFO_BYTES_EVICTED((byte) -0x08), + + /** + * These counters indicate a performance issue in WritePrepared transactions. + * We should not seem them ticking them much. + * # of times prepare_mutex_ is acquired in the fast path. + */ + TXN_PREPARE_MUTEX_OVERHEAD((byte) -0x09), + + /** + * # of times old_commit_map_mutex_ is acquired in the fast path. + */ + TXN_OLD_COMMIT_MAP_MUTEX_OVERHEAD((byte) -0x0A), + + /** + * # of times we checked a batch for duplicate keys. + */ + TXN_DUPLICATE_KEY_OVERHEAD((byte) -0x0B), + + /** + * # of times snapshot_mutex_ is acquired in the fast path. + */ + TXN_SNAPSHOT_MUTEX_OVERHEAD((byte) -0x0C), + + /** + * # of times ::Get returned TryAgain due to expired snapshot seq + */ + TXN_GET_TRY_AGAIN((byte) -0x0D), + + /** + * # of files marked as trash by delete scheduler + */ + FILES_MARKED_TRASH((byte) -0x0E), + + /** + * # of files deleted immediately by delete scheduler + */ + FILES_DELETED_IMMEDIATELY((byte) -0x0f), + + /** + * Compaction read and write statistics broken down by CompactionReason + */ + COMPACT_READ_BYTES_MARKED((byte) -0x10), + COMPACT_READ_BYTES_PERIODIC((byte) -0x11), + COMPACT_READ_BYTES_TTL((byte) -0x12), + COMPACT_WRITE_BYTES_MARKED((byte) -0x13), + COMPACT_WRITE_BYTES_PERIODIC((byte) -0x14), + COMPACT_WRITE_BYTES_TTL((byte) -0x15), + + /** + * DB error handler statistics + */ + ERROR_HANDLER_BG_ERROR_COUNT((byte) -0x16), + ERROR_HANDLER_BG_IO_ERROR_COUNT((byte) -0x17), + ERROR_HANDLER_BG_RETRYABLE_IO_ERROR_COUNT((byte) -0x18), + ERROR_HANDLER_AUTORESUME_COUNT((byte) -0x19), + ERROR_HANDLER_AUTORESUME_RETRY_TOTAL_COUNT((byte) -0x1A), + ERROR_HANDLER_AUTORESUME_SUCCESS_COUNT((byte) -0x1B), + + /** + * Bytes of raw data (payload) found on memtable at flush time. + * Contains the sum of garbage payload (bytes that are discarded + * at flush time) and useful payload (bytes of data that will + * eventually be written to SSTable). + */ + MEMTABLE_PAYLOAD_BYTES_AT_FLUSH((byte) -0x1C), + /** + * Outdated bytes of data present on memtable at flush time. + */ + MEMTABLE_GARBAGE_BYTES_AT_FLUSH((byte) -0x1D), + + /** + * Number of secondary cache hits + */ + SECONDARY_CACHE_HITS((byte) -0x1E), + + /** + * Bytes read by `VerifyChecksum()` and `VerifyFileChecksums()` APIs. + */ + VERIFY_CHECKSUM_READ_BYTES((byte) -0x1F), + + /** + * Bytes read/written while creating backups + */ + BACKUP_READ_BYTES((byte) -0x20), + BACKUP_WRITE_BYTES((byte) -0x21), + + /** + * Remote compaction read/write statistics + */ + REMOTE_COMPACT_READ_BYTES((byte) -0x22), + REMOTE_COMPACT_WRITE_BYTES((byte) -0x23), + + /** + * Tiered storage related statistics + */ + HOT_FILE_READ_BYTES((byte) -0x24), + WARM_FILE_READ_BYTES((byte) -0x25), + COLD_FILE_READ_BYTES((byte) -0x26), + HOT_FILE_READ_COUNT((byte) -0x27), + WARM_FILE_READ_COUNT((byte) -0x28), + COLD_FILE_READ_COUNT((byte) -0x29), + + /** + * (non-)last level read statistics + */ + LAST_LEVEL_READ_BYTES((byte) -0x2A), + LAST_LEVEL_READ_COUNT((byte) -0x2B), + NON_LAST_LEVEL_READ_BYTES((byte) -0x2C), + NON_LAST_LEVEL_READ_COUNT((byte) -0x2D), + + BLOCK_CHECKSUM_COMPUTE_COUNT((byte) -0x2E), + + /** + * # of times cache miss when accessing blob from blob cache. + */ + BLOB_DB_CACHE_MISS((byte) -0x2F), + + /** + * # of times cache hit when accessing blob from blob cache. + */ + BLOB_DB_CACHE_HIT((byte) -0x30), + + /** + * # of data blocks added to blob cache. + */ + BLOB_DB_CACHE_ADD((byte) -0x31), + + /** + * # # of failures when adding blobs to blob cache. + */ + BLOB_DB_CACHE_ADD_FAILURES((byte) -0x32), + + /** + * # of bytes read from blob cache. + */ + BLOB_DB_CACHE_BYTES_READ((byte) -0x33), + + /** + * # of bytes written into blob cache. + */ + BLOB_DB_CACHE_BYTES_WRITE((byte) -0x34), + + TICKER_ENUM_MAX((byte) 0x5F); + + private final byte value; + + TickerType(final byte value) { + this.value = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + * Get Ticker type by byte value. + * + * @param value byte representation of TickerType. + * + * @return {@link org.rocksdb.TickerType} instance. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static TickerType getTickerType(final byte value) { + for (final TickerType tickerType : TickerType.values()) { + if (tickerType.getValue() == value) { + return tickerType; + } + } + throw new IllegalArgumentException( + "Illegal value provided for TickerType."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TimedEnv.java b/src/rocksdb/java/src/main/java/org/rocksdb/TimedEnv.java new file mode 100644 index 000000000..dc8b5d6ef --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TimedEnv.java @@ -0,0 +1,30 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Timed environment. + */ +public class TimedEnv extends Env { + + /** + *

Creates a new environment that measures function call times for + * filesystem operations, reporting results to variables in PerfContext.

+ * + * + *

The caller must delete the result when it is + * no longer needed.

+ * + * @param baseEnv the base environment, + * must remain live while the result is in use. + */ + public TimedEnv(final Env baseEnv) { + super(createTimedEnv(baseEnv.nativeHandle_)); + } + + private static native long createTimedEnv(final long baseEnvHandle); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TraceOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/TraceOptions.java new file mode 100644 index 000000000..cf5f7bbe1 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TraceOptions.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * TraceOptions is used for + * {@link RocksDB#startTrace(TraceOptions, AbstractTraceWriter)}. + */ +public class TraceOptions { + private final long maxTraceFileSize; + + public TraceOptions() { + this.maxTraceFileSize = 64L * 1024L * 1024L * 1024L; // 64 GB + } + + public TraceOptions(final long maxTraceFileSize) { + this.maxTraceFileSize = maxTraceFileSize; + } + + /** + * To avoid the trace file size grows larger than the storage space, + * user can set the max trace file size in Bytes. Default is 64 GB. + * + * @return the max trace size + */ + public long getMaxTraceFileSize() { + return maxTraceFileSize; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TraceWriter.java b/src/rocksdb/java/src/main/java/org/rocksdb/TraceWriter.java new file mode 100644 index 000000000..cb0234e9b --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TraceWriter.java @@ -0,0 +1,36 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * TraceWriter allows exporting RocksDB traces to any system, + * one operation at a time. + */ +public interface TraceWriter { + + /** + * Write the data. + * + * @param data the data + * + * @throws RocksDBException if an error occurs whilst writing. + */ + void write(final Slice data) throws RocksDBException; + + /** + * Close the writer. + * + * @throws RocksDBException if an error occurs whilst closing the writer. + */ + void closeWriter() throws RocksDBException; + + /** + * Get the size of the file that this writer is writing to. + * + * @return the file size + */ + long getFileSize(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/Transaction.java b/src/rocksdb/java/src/main/java/org/rocksdb/Transaction.java new file mode 100644 index 000000000..b2cc8a932 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/Transaction.java @@ -0,0 +1,2170 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Provides BEGIN/COMMIT/ROLLBACK transactions. + * + * To use transactions, you must first create either an + * {@link OptimisticTransactionDB} or a {@link TransactionDB} + * + * To create a transaction, use + * {@link OptimisticTransactionDB#beginTransaction(org.rocksdb.WriteOptions)} or + * {@link TransactionDB#beginTransaction(org.rocksdb.WriteOptions)} + * + * It is up to the caller to synchronize access to this object. + * + * See samples/src/main/java/OptimisticTransactionSample.java and + * samples/src/main/java/TransactionSample.java for some simple + * examples. + */ +public class Transaction extends RocksObject { + + private final RocksDB parent; + + /** + * Intentionally package private + * as this is called from + * {@link OptimisticTransactionDB#beginTransaction(org.rocksdb.WriteOptions)} + * or {@link TransactionDB#beginTransaction(org.rocksdb.WriteOptions)} + * + * @param parent This must be either {@link TransactionDB} or + * {@link OptimisticTransactionDB} + * @param transactionHandle The native handle to the underlying C++ + * transaction object + */ + Transaction(final RocksDB parent, final long transactionHandle) { + super(transactionHandle); + this.parent = parent; + } + + /** + * If a transaction has a snapshot set, the transaction will ensure that + * any keys successfully written (or fetched via {@link #getForUpdate}) have + * not been modified outside of this transaction since the time the snapshot + * was set. + * + * If a snapshot has not been set, the transaction guarantees that keys have + * not been modified since the time each key was first written (or fetched via + * {@link #getForUpdate}). + * + * Using {@link #setSnapshot()} will provide stricter isolation guarantees + * at the expense of potentially more transaction failures due to conflicts + * with other writes. + * + * Calling {@link #setSnapshot()} has no effect on keys written before this + * function has been called. + * + * {@link #setSnapshot()} may be called multiple times if you would like to + * change the snapshot used for different operations in this transaction. + * + * Calling {@link #setSnapshot()} will not affect the version of Data returned + * by get(...) methods. See {@link #get} for more details. + */ + public void setSnapshot() { + assert(isOwningHandle()); + setSnapshot(nativeHandle_); + } + + /** + * Similar to {@link #setSnapshot()}, but will not change the current snapshot + * until put/merge/delete/getForUpdate/multiGetForUpdate is called. + * By calling this function, the transaction will essentially call + * {@link #setSnapshot()} for you right before performing the next + * write/getForUpdate. + * + * Calling {@link #setSnapshotOnNextOperation()} will not affect what + * snapshot is returned by {@link #getSnapshot} until the next + * write/getForUpdate is executed. + * + * When the snapshot is created the notifier's snapshotCreated method will + * be called so that the caller can get access to the snapshot. + * + * This is an optimization to reduce the likelihood of conflicts that + * could occur in between the time {@link #setSnapshot()} is called and the + * first write/getForUpdate operation. i.e. this prevents the following + * race-condition: + * + * txn1->setSnapshot(); + * txn2->put("A", ...); + * txn2->commit(); + * txn1->getForUpdate(opts, "A", ...); * FAIL! + */ + public void setSnapshotOnNextOperation() { + assert(isOwningHandle()); + setSnapshotOnNextOperation(nativeHandle_); + } + + /** + * Similar to {@link #setSnapshot()}, but will not change the current snapshot + * until put/merge/delete/getForUpdate/multiGetForUpdate is called. + * By calling this function, the transaction will essentially call + * {@link #setSnapshot()} for you right before performing the next + * write/getForUpdate. + * + * Calling {@link #setSnapshotOnNextOperation()} will not affect what + * snapshot is returned by {@link #getSnapshot} until the next + * write/getForUpdate is executed. + * + * When the snapshot is created the + * {@link AbstractTransactionNotifier#snapshotCreated(Snapshot)} method will + * be called so that the caller can get access to the snapshot. + * + * This is an optimization to reduce the likelihood of conflicts that + * could occur in between the time {@link #setSnapshot()} is called and the + * first write/getForUpdate operation. i.e. this prevents the following + * race-condition: + * + * txn1->setSnapshot(); + * txn2->put("A", ...); + * txn2->commit(); + * txn1->getForUpdate(opts, "A", ...); * FAIL! + * + * @param transactionNotifier A handler for receiving snapshot notifications + * for the transaction + * + */ + public void setSnapshotOnNextOperation( + final AbstractTransactionNotifier transactionNotifier) { + assert(isOwningHandle()); + setSnapshotOnNextOperation(nativeHandle_, transactionNotifier.nativeHandle_); + } + + /** + * Returns the Snapshot created by the last call to {@link #setSnapshot()}. + * + * REQUIRED: The returned Snapshot is only valid up until the next time + * {@link #setSnapshot()}/{@link #setSnapshotOnNextOperation()} is called, + * {@link #clearSnapshot()} is called, or the Transaction is deleted. + * + * @return The snapshot or null if there is no snapshot + */ + public Snapshot getSnapshot() { + assert(isOwningHandle()); + final long snapshotNativeHandle = getSnapshot(nativeHandle_); + if(snapshotNativeHandle == 0) { + return null; + } else { + final Snapshot snapshot = new Snapshot(snapshotNativeHandle); + return snapshot; + } + } + + /** + * Clears the current snapshot (i.e. no snapshot will be 'set') + * + * This removes any snapshot that currently exists or is set to be created + * on the next update operation ({@link #setSnapshotOnNextOperation()}). + * + * Calling {@link #clearSnapshot()} has no effect on keys written before this + * function has been called. + * + * If a reference to a snapshot was retrieved via {@link #getSnapshot()}, it + * will no longer be valid and should be discarded after a call to + * {@link #clearSnapshot()}. + */ + public void clearSnapshot() { + assert(isOwningHandle()); + clearSnapshot(nativeHandle_); + } + + /** + * Prepare the current transaction for 2PC + */ + public void prepare() throws RocksDBException { + //TODO(AR) consider a Java'ish version of this function, which returns an AutoCloseable (commit) + assert(isOwningHandle()); + prepare(nativeHandle_); + } + + /** + * Write all batched keys to the db atomically. + * + * Returns OK on success. + * + * May return any error status that could be returned by DB:Write(). + * + * If this transaction was created by an {@link OptimisticTransactionDB} + * Status::Busy() may be returned if the transaction could not guarantee + * that there are no write conflicts. Status::TryAgain() may be returned + * if the memtable history size is not large enough + * (See max_write_buffer_number_to_maintain). + * + * If this transaction was created by a {@link TransactionDB}, + * Status::Expired() may be returned if this transaction has lived for + * longer than {@link TransactionOptions#getExpiration()}. + * + * @throws RocksDBException if an error occurs when committing the transaction + */ + public void commit() throws RocksDBException { + assert(isOwningHandle()); + commit(nativeHandle_); + } + + /** + * Discard all batched writes in this transaction. + * + * @throws RocksDBException if an error occurs when rolling back the transaction + */ + public void rollback() throws RocksDBException { + assert(isOwningHandle()); + rollback(nativeHandle_); + } + + /** + * Records the state of the transaction for future calls to + * {@link #rollbackToSavePoint()}. + * + * May be called multiple times to set multiple save points. + * + * @throws RocksDBException if an error occurs whilst setting a save point + */ + public void setSavePoint() throws RocksDBException { + assert(isOwningHandle()); + setSavePoint(nativeHandle_); + } + + /** + * Undo all operations in this transaction (put, merge, delete, putLogData) + * since the most recent call to {@link #setSavePoint()} and removes the most + * recent {@link #setSavePoint()}. + * + * If there is no previous call to {@link #setSavePoint()}, + * returns Status::NotFound() + * + * @throws RocksDBException if an error occurs when rolling back to a save point + */ + public void rollbackToSavePoint() throws RocksDBException { + assert(isOwningHandle()); + rollbackToSavePoint(nativeHandle_); + } + + /** + * This function is similar to + * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])} except it will + * also read pending changes in this transaction. + * Currently, this function will return Status::MergeInProgress if the most + * recent write to the queried key in this batch is a Merge. + * + * If {@link ReadOptions#snapshot()} is not set, the current version of the + * key will be read. Calling {@link #setSnapshot()} does not affect the + * version of the data returned. + * + * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect + * what is read from the DB but will NOT change which keys are read from this + * transaction (the keys in this transaction do not yet belong to any snapshot + * and will be fetched regardless). + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance + * @param readOptions Read options. + * @param key the key to retrieve the value for. + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying native + * library. + */ + public byte[] get(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions readOptions, final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + return get(nativeHandle_, readOptions.nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * This function is similar to + * {@link RocksDB#get(ReadOptions, byte[])} except it will + * also read pending changes in this transaction. + * Currently, this function will return Status::MergeInProgress if the most + * recent write to the queried key in this batch is a Merge. + * + * If {@link ReadOptions#snapshot()} is not set, the current version of the + * key will be read. Calling {@link #setSnapshot()} does not affect the + * version of the data returned. + * + * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect + * what is read from the DB but will NOT change which keys are read from this + * transaction (the keys in this transaction do not yet belong to any snapshot + * and will be fetched regardless). + * + * @param readOptions Read options. + * @param key the key to retrieve the value for. + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying native + * library. + */ + public byte[] get(final ReadOptions readOptions, final byte[] key) + throws RocksDBException { + assert(isOwningHandle()); + return get(nativeHandle_, readOptions.nativeHandle_, key, key.length); + } + + /** + * This function is similar to + * {@link RocksDB#multiGetAsList} except it will + * also read pending changes in this transaction. + * Currently, this function will return Status::MergeInProgress if the most + * recent write to the queried key in this batch is a Merge. + * + * If {@link ReadOptions#snapshot()} is not set, the current version of the + * key will be read. Calling {@link #setSnapshot()} does not affect the + * version of the data returned. + * + * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect + * what is read from the DB but will NOT change which keys are read from this + * transaction (the keys in this transaction do not yet belong to any snapshot + * and will be fetched regardless). + * + * @param readOptions Read options. + * @param columnFamilyHandles {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys of keys for which values need to be retrieved. + * + * @return Array of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IllegalArgumentException thrown if the size of passed keys is not + * equal to the amount of passed column family handles. + */ + @Deprecated + public byte[][] multiGet(final ReadOptions readOptions, + final List columnFamilyHandles, final byte[][] keys) + throws RocksDBException { + assert(isOwningHandle()); + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (keys.length != columnFamilyHandles.size()) { + throw new IllegalArgumentException( + "For each key there must be a ColumnFamilyHandle."); + } + if(keys.length == 0) { + return new byte[0][0]; + } + final long[] cfHandles = new long[columnFamilyHandles.size()]; + for (int i = 0; i < columnFamilyHandles.size(); i++) { + cfHandles[i] = columnFamilyHandles.get(i).nativeHandle_; + } + + return multiGet(nativeHandle_, readOptions.nativeHandle_, + keys, cfHandles); + } + + /** + * This function is similar to + * {@link RocksDB#multiGetAsList(ReadOptions, List, List)} except it will + * also read pending changes in this transaction. + * Currently, this function will return Status::MergeInProgress if the most + * recent write to the queried key in this batch is a Merge. + * + * If {@link ReadOptions#snapshot()} is not set, the current version of the + * key will be read. Calling {@link #setSnapshot()} does not affect the + * version of the data returned. + * + * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect + * what is read from the DB but will NOT change which keys are read from this + * transaction (the keys in this transaction do not yet belong to any snapshot + * and will be fetched regardless). + * + * @param readOptions Read options. + * @param columnFamilyHandles {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys of keys for which values need to be retrieved. + * + * @return Array of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IllegalArgumentException thrown if the size of passed keys is not + * equal to the amount of passed column family handles. + */ + + public List multiGetAsList(final ReadOptions readOptions, + final List columnFamilyHandles, final List keys) + throws RocksDBException { + assert (isOwningHandle()); + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (keys.size() != columnFamilyHandles.size()) { + throw new IllegalArgumentException("For each key there must be a ColumnFamilyHandle."); + } + if (keys.size() == 0) { + return new ArrayList<>(0); + } + final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + final long[] cfHandles = new long[columnFamilyHandles.size()]; + for (int i = 0; i < columnFamilyHandles.size(); i++) { + cfHandles[i] = columnFamilyHandles.get(i).nativeHandle_; + } + + return Arrays.asList(multiGet(nativeHandle_, readOptions.nativeHandle_, keysArray, cfHandles)); + } + + /** + * This function is similar to + * {@link RocksDB#multiGetAsList} except it will + * also read pending changes in this transaction. + * Currently, this function will return Status::MergeInProgress if the most + * recent write to the queried key in this batch is a Merge. + * + * If {@link ReadOptions#snapshot()} is not set, the current version of the + * key will be read. Calling {@link #setSnapshot()} does not affect the + * version of the data returned. + * + * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect + * what is read from the DB but will NOT change which keys are read from this + * transaction (the keys in this transaction do not yet belong to any snapshot + * and will be fetched regardless). + * + * @param readOptions Read options.= + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys of keys for which values need to be retrieved. + * + * @return Array of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + @Deprecated + public byte[][] multiGet(final ReadOptions readOptions, final byte[][] keys) + throws RocksDBException { + assert(isOwningHandle()); + if(keys.length == 0) { + return new byte[0][0]; + } + + return multiGet(nativeHandle_, readOptions.nativeHandle_, + keys); + } + + /** + * This function is similar to + * {@link RocksDB#multiGetAsList} except it will + * also read pending changes in this transaction. + * Currently, this function will return Status::MergeInProgress if the most + * recent write to the queried key in this batch is a Merge. + * + * If {@link ReadOptions#snapshot()} is not set, the current version of the + * key will be read. Calling {@link #setSnapshot()} does not affect the + * version of the data returned. + * + * Note that setting {@link ReadOptions#setSnapshot(Snapshot)} will affect + * what is read from the DB but will NOT change which keys are read from this + * transaction (the keys in this transaction do not yet belong to any snapshot + * and will be fetched regardless). + * + * @param readOptions Read options.= + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys of keys for which values need to be retrieved. + * + * @return Array of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public List multiGetAsList(final ReadOptions readOptions, final List keys) + throws RocksDBException { + if (keys.size() == 0) { + return new ArrayList<>(0); + } + final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + + return Arrays.asList(multiGet(nativeHandle_, readOptions.nativeHandle_, keysArray)); + } + + /** + * Read this key and ensure that this transaction will only + * be able to be committed if this key is not written outside this + * transaction after it has first been read (or after the snapshot if a + * snapshot is set in this transaction). The transaction behavior is the + * same regardless of whether the key exists or not. + * + * Note: Currently, this function will return Status::MergeInProgress + * if the most recent write to the queried key in this batch is a Merge. + * + * The values returned by this function are similar to + * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])}. + * If value==nullptr, then this function will not read any data, but will + * still ensure that this key cannot be written to by outside of this + * transaction. + * + * If this transaction was created by an {@link OptimisticTransactionDB}, + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)} + * could cause {@link #commit()} to fail. Otherwise, it could return any error + * that could be returned by + * {@link RocksDB#get(ColumnFamilyHandle, ReadOptions, byte[])}. + * + * If this transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * {@link Status.Code#MergeInProgress} if merge operations cannot be + * resolved. + * + * @param readOptions Read options. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key to retrieve the value for. + * @param exclusive true if the transaction should have exclusive access to + * the key, otherwise false for shared access. + * @param doValidate true if it should validate the snapshot before doing the read + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] getForUpdate(final ReadOptions readOptions, + final ColumnFamilyHandle columnFamilyHandle, final byte[] key, final boolean exclusive, + final boolean doValidate) throws RocksDBException { + assert (isOwningHandle()); + return getForUpdate(nativeHandle_, readOptions.nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_, exclusive, doValidate); + } + + /** + * Same as + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean, boolean)} + * with doValidate=true. + * + * @param readOptions Read options. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key to retrieve the value for. + * @param exclusive true if the transaction should have exclusive access to + * the key, otherwise false for shared access. + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] getForUpdate(final ReadOptions readOptions, + final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final boolean exclusive) throws RocksDBException { + assert(isOwningHandle()); + return getForUpdate(nativeHandle_, readOptions.nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_, exclusive, true /*doValidate*/); + } + + /** + * Read this key and ensure that this transaction will only + * be able to be committed if this key is not written outside this + * transaction after it has first been read (or after the snapshot if a + * snapshot is set in this transaction). The transaction behavior is the + * same regardless of whether the key exists or not. + * + * Note: Currently, this function will return Status::MergeInProgress + * if the most recent write to the queried key in this batch is a Merge. + * + * The values returned by this function are similar to + * {@link RocksDB#get(ReadOptions, byte[])}. + * If value==nullptr, then this function will not read any data, but will + * still ensure that this key cannot be written to by outside of this + * transaction. + * + * If this transaction was created on an {@link OptimisticTransactionDB}, + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)} + * could cause {@link #commit()} to fail. Otherwise, it could return any error + * that could be returned by + * {@link RocksDB#get(ReadOptions, byte[])}. + * + * If this transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * {@link Status.Code#MergeInProgress} if merge operations cannot be + * resolved. + * + * @param readOptions Read options. + * @param key the key to retrieve the value for. + * @param exclusive true if the transaction should have exclusive access to + * the key, otherwise false for shared access. + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] getForUpdate(final ReadOptions readOptions, final byte[] key, + final boolean exclusive) throws RocksDBException { + assert(isOwningHandle()); + return getForUpdate( + nativeHandle_, readOptions.nativeHandle_, key, key.length, exclusive, true /*doValidate*/); + } + + /** + * A multi-key version of + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}. + * + * + * @param readOptions Read options. + * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle} + * instances + * @param keys the keys to retrieve the values for. + * + * @return Array of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + @Deprecated + public byte[][] multiGetForUpdate(final ReadOptions readOptions, + final List columnFamilyHandles, final byte[][] keys) + throws RocksDBException { + assert(isOwningHandle()); + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (keys.length != columnFamilyHandles.size()){ + throw new IllegalArgumentException( + "For each key there must be a ColumnFamilyHandle."); + } + if(keys.length == 0) { + return new byte[0][0]; + } + final long[] cfHandles = new long[columnFamilyHandles.size()]; + for (int i = 0; i < columnFamilyHandles.size(); i++) { + cfHandles[i] = columnFamilyHandles.get(i).nativeHandle_; + } + return multiGetForUpdate(nativeHandle_, readOptions.nativeHandle_, + keys, cfHandles); + } + + /** + * A multi-key version of + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}. + * + * + * @param readOptions Read options. + * @param columnFamilyHandles {@link org.rocksdb.ColumnFamilyHandle} + * instances + * @param keys the keys to retrieve the values for. + * + * @return Array of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public List multiGetForUpdateAsList(final ReadOptions readOptions, + final List columnFamilyHandles, final List keys) + throws RocksDBException { + assert (isOwningHandle()); + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (keys.size() != columnFamilyHandles.size()) { + throw new IllegalArgumentException("For each key there must be a ColumnFamilyHandle."); + } + if (keys.size() == 0) { + return new ArrayList<>(); + } + final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + + final long[] cfHandles = new long[columnFamilyHandles.size()]; + for (int i = 0; i < columnFamilyHandles.size(); i++) { + cfHandles[i] = columnFamilyHandles.get(i).nativeHandle_; + } + return Arrays.asList( + multiGetForUpdate(nativeHandle_, readOptions.nativeHandle_, keysArray, cfHandles)); + } + + /** + * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}. + * + * + * @param readOptions Read options. + * @param keys the keys to retrieve the values for. + * + * @return Array of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + @Deprecated + public byte[][] multiGetForUpdate(final ReadOptions readOptions, final byte[][] keys) + throws RocksDBException { + assert(isOwningHandle()); + if(keys.length == 0) { + return new byte[0][0]; + } + + return multiGetForUpdate(nativeHandle_, + readOptions.nativeHandle_, keys); + } + + /** + * A multi-key version of {@link #getForUpdate(ReadOptions, byte[], boolean)}. + * + * + * @param readOptions Read options. + * @param keys the keys to retrieve the values for. + * + * @return List of values, one for each key + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public List multiGetForUpdateAsList( + final ReadOptions readOptions, final List keys) throws RocksDBException { + assert (isOwningHandle()); + if (keys.size() == 0) { + return new ArrayList<>(0); + } + + final byte[][] keysArray = keys.toArray(new byte[keys.size()][]); + + return Arrays.asList(multiGetForUpdate(nativeHandle_, readOptions.nativeHandle_, keysArray)); + } + + /** + * Returns an iterator that will iterate on all keys in the default + * column family including both keys in the DB and uncommitted keys in this + * transaction. + * + * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read + * from the DB but will NOT change which keys are read from this transaction + * (the keys in this transaction do not yet belong to any snapshot and will be + * fetched regardless). + * + * Caller is responsible for deleting the returned Iterator. + * + * The returned iterator is only valid until {@link #commit()}, + * {@link #rollback()}, or {@link #rollbackToSavePoint()} is called. + * + * @param readOptions Read options. + * + * @return instance of iterator object. + */ + public RocksIterator getIterator(final ReadOptions readOptions) { + assert(isOwningHandle()); + return new RocksIterator(parent, getIterator(nativeHandle_, + readOptions.nativeHandle_)); + } + + /** + * Returns an iterator that will iterate on all keys in the column family + * specified by {@code columnFamilyHandle} including both keys in the DB + * and uncommitted keys in this transaction. + * + * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is read + * from the DB but will NOT change which keys are read from this transaction + * (the keys in this transaction do not yet belong to any snapshot and will be + * fetched regardless). + * + * Caller is responsible for calling {@link RocksIterator#close()} on + * the returned Iterator. + * + * The returned iterator is only valid until {@link #commit()}, + * {@link #rollback()}, or {@link #rollbackToSavePoint()} is called. + * + * @param readOptions Read options. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * + * @return instance of iterator object. + */ + public RocksIterator getIterator(final ReadOptions readOptions, + final ColumnFamilyHandle columnFamilyHandle) { + assert(isOwningHandle()); + return new RocksIterator(parent, getIterator(nativeHandle_, + readOptions.nativeHandle_, columnFamilyHandle.nativeHandle_)); + } + + /** + * Similar to {@link RocksDB#put(ColumnFamilyHandle, byte[], byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to put the key/value into + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * @param assumeTracked true when it is expected that the key is already + * tracked. More specifically, it means the the key was previous tracked + * in the same savepoint, with the same exclusive flag, and at a lower + * sequence number. If valid then it skips ValidateSnapshot, + * throws an error otherwise. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final byte[] value, final boolean assumeTracked) throws RocksDBException { + assert (isOwningHandle()); + put(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_, assumeTracked); + } + + /** + * Similar to {@link #put(ColumnFamilyHandle, byte[], byte[], boolean)} + * but with {@code assumeTracked = false}. + * + * Will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to put the key/value into + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final byte[] value) throws RocksDBException { + assert(isOwningHandle()); + put(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_, false); + } + + /** + * Similar to {@link RocksDB#put(byte[], byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void put(final byte[] key, final byte[] value) + throws RocksDBException { + assert(isOwningHandle()); + put(nativeHandle_, key, key.length, value, value.length); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #put(ColumnFamilyHandle, byte[], byte[])} but allows + * you to specify the key and value in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to put the key/value into + * @param keyParts the specified key to be inserted. + * @param valueParts the value associated with the specified key. + * @param assumeTracked true when it is expected that the key is already + * tracked. More specifically, it means the the key was previous tracked + * in the same savepoint, with the same exclusive flag, and at a lower + * sequence number. If valid then it skips ValidateSnapshot, + * throws an error otherwise. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts, final byte[][] valueParts, + final boolean assumeTracked) throws RocksDBException { + assert (isOwningHandle()); + put(nativeHandle_, keyParts, keyParts.length, valueParts, valueParts.length, + columnFamilyHandle.nativeHandle_, assumeTracked); + } + + /** + * Similar to {@link #put(ColumnFamilyHandle, byte[][], byte[][], boolean)} + * but with with {@code assumeTracked = false}. + * + * Allows you to specify the key and value in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to put the key/value into + * @param keyParts the specified key to be inserted. + * @param valueParts the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts, final byte[][] valueParts) + throws RocksDBException { + assert(isOwningHandle()); + put(nativeHandle_, keyParts, keyParts.length, valueParts, valueParts.length, + columnFamilyHandle.nativeHandle_, false); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #put(byte[], byte[])} but allows + * you to specify the key and value in several parts that will be + * concatenated together + * + * @param keyParts the specified key to be inserted. + * @param valueParts the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void put(final byte[][] keyParts, final byte[][] valueParts) + throws RocksDBException { + assert(isOwningHandle()); + put(nativeHandle_, keyParts, keyParts.length, valueParts, + valueParts.length); + } + + /** + * Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to merge the key/value into + * @param key the specified key to be merged. + * @param value the value associated with the specified key. + * @param assumeTracked true when it is expected that the key is already + * tracked. More specifically, it means the the key was previous tracked + * in the same savepoint, with the same exclusive flag, and at a lower + * sequence number. If valid then it skips ValidateSnapshot, + * throws an error otherwise. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void merge(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value, final boolean assumeTracked) + throws RocksDBException { + assert (isOwningHandle()); + merge(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_, assumeTracked); + } + + /** + * Similar to {@link #merge(ColumnFamilyHandle, byte[], byte[], boolean)} + * but with {@code assumeTracked = false}. + * + * Will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to merge the key/value into + * @param key the specified key to be merged. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void merge(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value) throws RocksDBException { + assert(isOwningHandle()); + merge(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_, false); + } + + /** + * Similar to {@link RocksDB#merge(byte[], byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param key the specified key to be merged. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void merge(final byte[] key, final byte[] value) + throws RocksDBException { + assert(isOwningHandle()); + merge(nativeHandle_, key, key.length, value, value.length); + } + + /** + * Similar to {@link RocksDB#delete(ColumnFamilyHandle, byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param key the specified key to be deleted. + * @param assumeTracked true when it is expected that the key is already + * tracked. More specifically, it means the the key was previous tracked + * in the same savepoint, with the same exclusive flag, and at a lower + * sequence number. If valid then it skips ValidateSnapshot, + * throws an error otherwise. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final boolean assumeTracked) throws RocksDBException { + assert (isOwningHandle()); + delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_, + assumeTracked); + } + + /** + * Similar to {@link #delete(ColumnFamilyHandle, byte[], boolean)} + * but with {@code assumeTracked = false}. + * + * Will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + delete(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_, + /*assumeTracked*/ false); + } + + /** + * Similar to {@link RocksDB#delete(byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void delete(final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + delete(nativeHandle_, key, key.length); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #delete(ColumnFamilyHandle, byte[])} but allows + * you to specify the key in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param keyParts the specified key to be deleted. + * @param assumeTracked true when it is expected that the key is already + * tracked. More specifically, it means the the key was previous tracked + * in the same savepoint, with the same exclusive flag, and at a lower + * sequence number. If valid then it skips ValidateSnapshot, + * throws an error otherwise. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts, final boolean assumeTracked) + throws RocksDBException { + assert (isOwningHandle()); + delete(nativeHandle_, keyParts, keyParts.length, + columnFamilyHandle.nativeHandle_, assumeTracked); + } + + /** + * Similar to{@link #delete(ColumnFamilyHandle, byte[][], boolean)} + * but with {@code assumeTracked = false}. + * + * Allows you to specify the key in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param keyParts the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void delete(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + delete(nativeHandle_, keyParts, keyParts.length, + columnFamilyHandle.nativeHandle_, false); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #delete(byte[])} but allows + * you to specify key the in several parts that will be + * concatenated together. + * + * @param keyParts the specified key to be deleted + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void delete(final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + delete(nativeHandle_, keyParts, keyParts.length); + } + + /** + * Similar to {@link RocksDB#singleDelete(ColumnFamilyHandle, byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param key the specified key to be deleted. + * @param assumeTracked true when it is expected that the key is already + * tracked. More specifically, it means the key was previously tracked + * in the same savepoint, with the same exclusive flag, and at a lower + * sequence number. If valid then it skips ValidateSnapshot, + * throws an error otherwise. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final boolean assumeTracked) throws RocksDBException { + assert (isOwningHandle()); + singleDelete(nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_, assumeTracked); + } + + /** + * Similar to {@link #singleDelete(ColumnFamilyHandle, byte[], boolean)} + * but with {@code assumeTracked = false}. + * + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + singleDelete(nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_, false); + } + + /** + * Similar to {@link RocksDB#singleDelete(byte[])}, but + * will also perform conflict checking on the keys be written. + * + * If this Transaction was created on an {@link OptimisticTransactionDB}, + * these functions should always succeed. + * + * If this Transaction was created on a {@link TransactionDB}, an + * {@link RocksDBException} may be thrown with an accompanying {@link Status} + * when: + * {@link Status.Code#Busy} if there is a write conflict, + * {@link Status.Code#TimedOut} if a lock could not be acquired, + * {@link Status.Code#TryAgain} if the memtable history size is not large + * enough. See + * {@link ColumnFamilyOptions#maxWriteBufferNumberToMaintain()} + * + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + singleDelete(nativeHandle_, key, key.length); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #singleDelete(ColumnFamilyHandle, byte[])} but allows + * you to specify the key in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param keyParts the specified key to be deleted. + * @param assumeTracked true when it is expected that the key is already + * tracked. More specifically, it means the key was previously tracked + * in the same savepoint, with the same exclusive flag, and at a lower + * sequence number. If valid then it skips ValidateSnapshot, + * throws an error otherwise. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts, final boolean assumeTracked) + throws RocksDBException { + assert (isOwningHandle()); + singleDelete(nativeHandle_, keyParts, keyParts.length, + columnFamilyHandle.nativeHandle_, assumeTracked); + } + + /** + * Similar to{@link #singleDelete(ColumnFamilyHandle, byte[][], boolean)} + * but with {@code assumeTracked = false}. + * + * Allows you to specify the key in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param keyParts the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + singleDelete(nativeHandle_, keyParts, keyParts.length, + columnFamilyHandle.nativeHandle_, false); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #singleDelete(byte[])} but allows + * you to specify the key in several parts that will be + * concatenated together. + * + * @param keyParts the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + @Experimental("Performance optimization for a very specific workload") + public void singleDelete(final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + singleDelete(nativeHandle_, keyParts, keyParts.length); + } + + /** + * Similar to {@link RocksDB#put(ColumnFamilyHandle, byte[], byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #put(ColumnFamilyHandle, byte[], byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param columnFamilyHandle The column family to put the key/value into + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void putUntracked(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value) throws RocksDBException { + assert(isOwningHandle()); + putUntracked(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#put(byte[], byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #put(byte[], byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void putUntracked(final byte[] key, final byte[] value) + throws RocksDBException { + assert(isOwningHandle()); + putUntracked(nativeHandle_, key, key.length, value, value.length); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #putUntracked(ColumnFamilyHandle, byte[], byte[])} but + * allows you to specify the key and value in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to put the key/value into + * @param keyParts the specified key to be inserted. + * @param valueParts the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void putUntracked(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts, final byte[][] valueParts) + throws RocksDBException { + assert(isOwningHandle()); + putUntracked(nativeHandle_, keyParts, keyParts.length, valueParts, + valueParts.length, columnFamilyHandle.nativeHandle_); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #putUntracked(byte[], byte[])} but + * allows you to specify the key and value in several parts that will be + * concatenated together. + * + * @param keyParts the specified key to be inserted. + * @param valueParts the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void putUntracked(final byte[][] keyParts, final byte[][] valueParts) + throws RocksDBException { + assert(isOwningHandle()); + putUntracked(nativeHandle_, keyParts, keyParts.length, valueParts, + valueParts.length); + } + + /** + * Similar to {@link RocksDB#merge(ColumnFamilyHandle, byte[], byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #merge(ColumnFamilyHandle, byte[], byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param columnFamilyHandle The column family to merge the key/value into + * @param key the specified key to be merged. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void mergeUntracked(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value) throws RocksDBException { + mergeUntracked(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#merge(byte[], byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #merge(byte[], byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param key the specified key to be merged. + * @param value the value associated with the specified key. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void mergeUntracked(final byte[] key, final byte[] value) + throws RocksDBException { + assert(isOwningHandle()); + mergeUntracked(nativeHandle_, key, key.length, value, value.length); + } + + /** + * Similar to {@link RocksDB#delete(ColumnFamilyHandle, byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #delete(ColumnFamilyHandle, byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void deleteUntracked(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + deleteUntracked(nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#delete(byte[])}, + * but operates on the transactions write batch. This write will only happen + * if this transaction gets committed successfully. + * + * Unlike {@link #delete(byte[])} no conflict + * checking will be performed for this key. + * + * If this Transaction was created on a {@link TransactionDB}, this function + * will still acquire locks necessary to make sure this write doesn't cause + * conflicts in other transactions; This may cause a {@link RocksDBException} + * with associated {@link Status.Code#Busy}. + * + * @param key the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void deleteUntracked(final byte[] key) throws RocksDBException { + assert(isOwningHandle()); + deleteUntracked(nativeHandle_, key, key.length); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #deleteUntracked(ColumnFamilyHandle, byte[])} but allows + * you to specify the key in several parts that will be + * concatenated together. + * + * @param columnFamilyHandle The column family to delete the key/value from + * @param keyParts the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void deleteUntracked(final ColumnFamilyHandle columnFamilyHandle, + final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + deleteUntracked(nativeHandle_, keyParts, keyParts.length, + columnFamilyHandle.nativeHandle_); + } + + //TODO(AR) refactor if we implement org.rocksdb.SliceParts in future + /** + * Similar to {@link #deleteUntracked(byte[])} but allows + * you to specify the key in several parts that will be + * concatenated together. + * + * @param keyParts the specified key to be deleted. + * + * @throws RocksDBException when one of the TransactionalDB conditions + * described above occurs, or in the case of an unexpected error + */ + public void deleteUntracked(final byte[][] keyParts) throws RocksDBException { + assert(isOwningHandle()); + deleteUntracked(nativeHandle_, keyParts, keyParts.length); + } + + /** + * Similar to {@link WriteBatch#putLogData(byte[])} + * + * @param blob binary object to be inserted + */ + public void putLogData(final byte[] blob) { + assert(isOwningHandle()); + putLogData(nativeHandle_, blob, blob.length); + } + + /** + * By default, all put/merge/delete operations will be indexed in the + * transaction so that get/getForUpdate/getIterator can search for these + * keys. + * + * If the caller does not want to fetch the keys about to be written, + * they may want to avoid indexing as a performance optimization. + * Calling {@link #disableIndexing()} will turn off indexing for all future + * put/merge/delete operations until {@link #enableIndexing()} is called. + * + * If a key is put/merge/deleted after {@link #disableIndexing()} is called + * and then is fetched via get/getForUpdate/getIterator, the result of the + * fetch is undefined. + */ + public void disableIndexing() { + assert(isOwningHandle()); + disableIndexing(nativeHandle_); + } + + /** + * Re-enables indexing after a previous call to {@link #disableIndexing()} + */ + public void enableIndexing() { + assert(isOwningHandle()); + enableIndexing(nativeHandle_); + } + + /** + * Returns the number of distinct Keys being tracked by this transaction. + * If this transaction was created by a {@link TransactionDB}, this is the + * number of keys that are currently locked by this transaction. + * If this transaction was created by an {@link OptimisticTransactionDB}, + * this is the number of keys that need to be checked for conflicts at commit + * time. + * + * @return the number of distinct Keys being tracked by this transaction + */ + public long getNumKeys() { + assert(isOwningHandle()); + return getNumKeys(nativeHandle_); + } + + /** + * Returns the number of puts that have been applied to this + * transaction so far. + * + * @return the number of puts that have been applied to this transaction + */ + public long getNumPuts() { + assert(isOwningHandle()); + return getNumPuts(nativeHandle_); + } + + /** + * Returns the number of deletes that have been applied to this + * transaction so far. + * + * @return the number of deletes that have been applied to this transaction + */ + public long getNumDeletes() { + assert(isOwningHandle()); + return getNumDeletes(nativeHandle_); + } + + /** + * Returns the number of merges that have been applied to this + * transaction so far. + * + * @return the number of merges that have been applied to this transaction + */ + public long getNumMerges() { + assert(isOwningHandle()); + return getNumMerges(nativeHandle_); + } + + /** + * Returns the elapsed time in milliseconds since this Transaction began. + * + * @return the elapsed time in milliseconds since this transaction began. + */ + public long getElapsedTime() { + assert(isOwningHandle()); + return getElapsedTime(nativeHandle_); + } + + /** + * Fetch the underlying write batch that contains all pending changes to be + * committed. + * + * Note: You should not write or delete anything from the batch directly and + * should only use the functions in the {@link Transaction} class to + * write to this transaction. + * + * @return The write batch + */ + public WriteBatchWithIndex getWriteBatch() { + assert(isOwningHandle()); + final WriteBatchWithIndex writeBatchWithIndex = + new WriteBatchWithIndex(getWriteBatch(nativeHandle_)); + return writeBatchWithIndex; + } + + /** + * Change the value of {@link TransactionOptions#getLockTimeout()} + * (in milliseconds) for this transaction. + * + * Has no effect on OptimisticTransactions. + * + * @param lockTimeout the timeout (in milliseconds) for locks used by this + * transaction. + */ + public void setLockTimeout(final long lockTimeout) { + assert(isOwningHandle()); + setLockTimeout(nativeHandle_, lockTimeout); + } + + /** + * Return the WriteOptions that will be used during {@link #commit()}. + * + * @return the WriteOptions that will be used + */ + public WriteOptions getWriteOptions() { + assert(isOwningHandle()); + final WriteOptions writeOptions = + new WriteOptions(getWriteOptions(nativeHandle_)); + return writeOptions; + } + + /** + * Reset the WriteOptions that will be used during {@link #commit()}. + * + * @param writeOptions The new WriteOptions + */ + public void setWriteOptions(final WriteOptions writeOptions) { + assert(isOwningHandle()); + setWriteOptions(nativeHandle_, writeOptions.nativeHandle_); + } + + /** + * If this key was previously fetched in this transaction using + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}/ + * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, calling + * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will tell + * the transaction that it no longer needs to do any conflict checking + * for this key. + * + * If a key has been fetched N times via + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}/ + * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, then + * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will only have an + * effect if it is also called N times. If this key has been written to in + * this transaction, {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} + * will have no effect. + * + * If {@link #setSavePoint()} has been called after the + * {@link #getForUpdate(ReadOptions, ColumnFamilyHandle, byte[], boolean)}, + * {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} will not have any + * effect. + * + * If this Transaction was created by an {@link OptimisticTransactionDB}, + * calling {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} can affect + * whether this key is conflict checked at commit time. + * If this Transaction was created by a {@link TransactionDB}, + * calling {@link #undoGetForUpdate(ColumnFamilyHandle, byte[])} may release + * any held locks for this key. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key to retrieve the value for. + */ + public void undoGetForUpdate(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) { + assert(isOwningHandle()); + undoGetForUpdate(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); + } + + /** + * If this key was previously fetched in this transaction using + * {@link #getForUpdate(ReadOptions, byte[], boolean)}/ + * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, calling + * {@link #undoGetForUpdate(byte[])} will tell + * the transaction that it no longer needs to do any conflict checking + * for this key. + * + * If a key has been fetched N times via + * {@link #getForUpdate(ReadOptions, byte[], boolean)}/ + * {@link #multiGetForUpdate(ReadOptions, List, byte[][])}, then + * {@link #undoGetForUpdate(byte[])} will only have an + * effect if it is also called N times. If this key has been written to in + * this transaction, {@link #undoGetForUpdate(byte[])} + * will have no effect. + * + * If {@link #setSavePoint()} has been called after the + * {@link #getForUpdate(ReadOptions, byte[], boolean)}, + * {@link #undoGetForUpdate(byte[])} will not have any + * effect. + * + * If this Transaction was created by an {@link OptimisticTransactionDB}, + * calling {@link #undoGetForUpdate(byte[])} can affect + * whether this key is conflict checked at commit time. + * If this Transaction was created by a {@link TransactionDB}, + * calling {@link #undoGetForUpdate(byte[])} may release + * any held locks for this key. + * + * @param key the key to retrieve the value for. + */ + public void undoGetForUpdate(final byte[] key) { + assert(isOwningHandle()); + undoGetForUpdate(nativeHandle_, key, key.length); + } + + /** + * Adds the keys from the WriteBatch to the transaction + * + * @param writeBatch The write batch to read from + * + * @throws RocksDBException if an error occurs whilst rebuilding from the + * write batch. + */ + public void rebuildFromWriteBatch(final WriteBatch writeBatch) + throws RocksDBException { + assert(isOwningHandle()); + rebuildFromWriteBatch(nativeHandle_, writeBatch.nativeHandle_); + } + + /** + * Get the Commit time Write Batch. + * + * @return the commit time write batch. + */ + public WriteBatch getCommitTimeWriteBatch() { + assert(isOwningHandle()); + final WriteBatch writeBatch = + new WriteBatch(getCommitTimeWriteBatch(nativeHandle_)); + return writeBatch; + } + + /** + * Set the log number. + * + * @param logNumber the log number + */ + public void setLogNumber(final long logNumber) { + assert(isOwningHandle()); + setLogNumber(nativeHandle_, logNumber); + } + + /** + * Get the log number. + * + * @return the log number + */ + public long getLogNumber() { + assert(isOwningHandle()); + return getLogNumber(nativeHandle_); + } + + /** + * Set the name of the transaction. + * + * @param transactionName the name of the transaction + * + * @throws RocksDBException if an error occurs when setting the transaction + * name. + */ + public void setName(final String transactionName) throws RocksDBException { + assert(isOwningHandle()); + setName(nativeHandle_, transactionName); + } + + /** + * Get the name of the transaction. + * + * @return the name of the transaction + */ + public String getName() { + assert(isOwningHandle()); + return getName(nativeHandle_); + } + + /** + * Get the ID of the transaction. + * + * @return the ID of the transaction. + */ + public long getID() { + assert(isOwningHandle()); + return getID(nativeHandle_); + } + + /** + * Determine if a deadlock has been detected. + * + * @return true if a deadlock has been detected. + */ + public boolean isDeadlockDetect() { + assert(isOwningHandle()); + return isDeadlockDetect(nativeHandle_); + } + + /** + * Get the list of waiting transactions. + * + * @return The list of waiting transactions. + */ + public WaitingTransactions getWaitingTxns() { + assert(isOwningHandle()); + return getWaitingTxns(nativeHandle_); + } + + /** + * Get the execution status of the transaction. + * + * NOTE: The execution status of an Optimistic Transaction + * never changes. This is only useful for non-optimistic transactions! + * + * @return The execution status of the transaction + */ + public TransactionState getState() { + assert(isOwningHandle()); + return TransactionState.getTransactionState( + getState(nativeHandle_)); + } + + /** + * The globally unique id with which the transaction is identified. This id + * might or might not be set depending on the implementation. Similarly the + * implementation decides the point in lifetime of a transaction at which it + * assigns the id. Although currently it is the case, the id is not guaranteed + * to remain the same across restarts. + * + * @return the transaction id. + */ + @Experimental("NOTE: Experimental feature") + public long getId() { + assert(isOwningHandle()); + return getId(nativeHandle_); + } + + public enum TransactionState { + STARTED((byte)0), + AWAITING_PREPARE((byte)1), + PREPARED((byte)2), + AWAITING_COMMIT((byte)3), + COMMITTED((byte)4), + AWAITING_ROLLBACK((byte)5), + ROLLEDBACK((byte)6), + LOCKS_STOLEN((byte)7); + + /* + * Keep old misspelled variable as alias + * Tip from https://stackoverflow.com/a/37092410/454544 + */ + public static final TransactionState COMMITED = COMMITTED; + + private final byte value; + + TransactionState(final byte value) { + this.value = value; + } + + /** + * Get TransactionState by byte value. + * + * @param value byte representation of TransactionState. + * + * @return {@link org.rocksdb.Transaction.TransactionState} instance or null. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static TransactionState getTransactionState(final byte value) { + for (final TransactionState transactionState : TransactionState.values()) { + if (transactionState.value == value){ + return transactionState; + } + } + throw new IllegalArgumentException( + "Illegal value provided for TransactionState."); + } + } + + /** + * Called from C++ native method {@link #getWaitingTxns(long)} + * to construct a WaitingTransactions object. + * + * @param columnFamilyId The id of the {@link ColumnFamilyHandle} + * @param key The key + * @param transactionIds The transaction ids + * + * @return The waiting transactions + */ + private WaitingTransactions newWaitingTransactions( + final long columnFamilyId, final String key, + final long[] transactionIds) { + return new WaitingTransactions(columnFamilyId, key, transactionIds); + } + + public static class WaitingTransactions { + private final long columnFamilyId; + private final String key; + private final long[] transactionIds; + + private WaitingTransactions(final long columnFamilyId, final String key, + final long[] transactionIds) { + this.columnFamilyId = columnFamilyId; + this.key = key; + this.transactionIds = transactionIds; + } + + /** + * Get the Column Family ID. + * + * @return The column family ID + */ + public long getColumnFamilyId() { + return columnFamilyId; + } + + /** + * Get the key on which the transactions are waiting. + * + * @return The key + */ + public String getKey() { + return key; + } + + /** + * Get the IDs of the waiting transactions. + * + * @return The IDs of the waiting transactions + */ + public long[] getTransactionIds() { + return transactionIds; + } + } + + private native void setSnapshot(final long handle); + private native void setSnapshotOnNextOperation(final long handle); + private native void setSnapshotOnNextOperation(final long handle, + final long transactionNotifierHandle); + private native long getSnapshot(final long handle); + private native void clearSnapshot(final long handle); + private native void prepare(final long handle) throws RocksDBException; + private native void commit(final long handle) throws RocksDBException; + private native void rollback(final long handle) throws RocksDBException; + private native void setSavePoint(final long handle) throws RocksDBException; + private native void rollbackToSavePoint(final long handle) + throws RocksDBException; + private native byte[] get(final long handle, final long readOptionsHandle, + final byte key[], final int keyLength, final long columnFamilyHandle) + throws RocksDBException; + private native byte[] get(final long handle, final long readOptionsHandle, + final byte key[], final int keyLen) throws RocksDBException; + private native byte[][] multiGet(final long handle, + final long readOptionsHandle, final byte[][] keys, + final long[] columnFamilyHandles) throws RocksDBException; + private native byte[][] multiGet(final long handle, + final long readOptionsHandle, final byte[][] keys) + throws RocksDBException; + private native byte[] getForUpdate(final long handle, final long readOptionsHandle, + final byte key[], final int keyLength, final long columnFamilyHandle, final boolean exclusive, + final boolean doValidate) throws RocksDBException; + private native byte[] getForUpdate(final long handle, final long readOptionsHandle, + final byte key[], final int keyLen, final boolean exclusive, final boolean doValidate) + throws RocksDBException; + private native byte[][] multiGetForUpdate(final long handle, + final long readOptionsHandle, final byte[][] keys, + final long[] columnFamilyHandles) throws RocksDBException; + private native byte[][] multiGetForUpdate(final long handle, + final long readOptionsHandle, final byte[][] keys) + throws RocksDBException; + private native long getIterator(final long handle, + final long readOptionsHandle); + private native long getIterator(final long handle, + final long readOptionsHandle, final long columnFamilyHandle); + private native void put(final long handle, final byte[] key, final int keyLength, + final byte[] value, final int valueLength, final long columnFamilyHandle, + final boolean assumeTracked) throws RocksDBException; + private native void put(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength) + throws RocksDBException; + private native void put(final long handle, final byte[][] keys, final int keysLength, + final byte[][] values, final int valuesLength, final long columnFamilyHandle, + final boolean assumeTracked) throws RocksDBException; + private native void put(final long handle, final byte[][] keys, + final int keysLength, final byte[][] values, final int valuesLength) + throws RocksDBException; + private native void merge(final long handle, final byte[] key, final int keyLength, + final byte[] value, final int valueLength, final long columnFamilyHandle, + final boolean assumeTracked) throws RocksDBException; + private native void merge(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength) + throws RocksDBException; + private native void delete(final long handle, final byte[] key, final int keyLength, + final long columnFamilyHandle, final boolean assumeTracked) throws RocksDBException; + private native void delete(final long handle, final byte[] key, + final int keyLength) throws RocksDBException; + private native void delete(final long handle, final byte[][] keys, final int keysLength, + final long columnFamilyHandle, final boolean assumeTracked) throws RocksDBException; + private native void delete(final long handle, final byte[][] keys, + final int keysLength) throws RocksDBException; + private native void singleDelete(final long handle, final byte[] key, final int keyLength, + final long columnFamilyHandle, final boolean assumeTracked) throws RocksDBException; + private native void singleDelete(final long handle, final byte[] key, + final int keyLength) throws RocksDBException; + private native void singleDelete(final long handle, final byte[][] keys, final int keysLength, + final long columnFamilyHandle, final boolean assumeTracked) throws RocksDBException; + private native void singleDelete(final long handle, final byte[][] keys, + final int keysLength) throws RocksDBException; + private native void putUntracked(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength, + final long columnFamilyHandle) throws RocksDBException; + private native void putUntracked(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength) + throws RocksDBException; + private native void putUntracked(final long handle, final byte[][] keys, + final int keysLength, final byte[][] values, final int valuesLength, + final long columnFamilyHandle) throws RocksDBException; + private native void putUntracked(final long handle, final byte[][] keys, + final int keysLength, final byte[][] values, final int valuesLength) + throws RocksDBException; + private native void mergeUntracked(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength, + final long columnFamilyHandle) throws RocksDBException; + private native void mergeUntracked(final long handle, final byte[] key, + final int keyLength, final byte[] value, final int valueLength) + throws RocksDBException; + private native void deleteUntracked(final long handle, final byte[] key, + final int keyLength, final long columnFamilyHandle) + throws RocksDBException; + private native void deleteUntracked(final long handle, final byte[] key, + final int keyLength) throws RocksDBException; + private native void deleteUntracked(final long handle, final byte[][] keys, + final int keysLength, final long columnFamilyHandle) + throws RocksDBException; + private native void deleteUntracked(final long handle, final byte[][] keys, + final int keysLength) throws RocksDBException; + private native void putLogData(final long handle, final byte[] blob, + final int blobLength); + private native void disableIndexing(final long handle); + private native void enableIndexing(final long handle); + private native long getNumKeys(final long handle); + private native long getNumPuts(final long handle); + private native long getNumDeletes(final long handle); + private native long getNumMerges(final long handle); + private native long getElapsedTime(final long handle); + private native long getWriteBatch(final long handle); + private native void setLockTimeout(final long handle, final long lockTimeout); + private native long getWriteOptions(final long handle); + private native void setWriteOptions(final long handle, + final long writeOptionsHandle); + private native void undoGetForUpdate(final long handle, final byte[] key, + final int keyLength, final long columnFamilyHandle); + private native void undoGetForUpdate(final long handle, final byte[] key, + final int keyLength); + private native void rebuildFromWriteBatch(final long handle, + final long writeBatchHandle) throws RocksDBException; + private native long getCommitTimeWriteBatch(final long handle); + private native void setLogNumber(final long handle, final long logNumber); + private native long getLogNumber(final long handle); + private native void setName(final long handle, final String name) + throws RocksDBException; + private native String getName(final long handle); + private native long getID(final long handle); + private native boolean isDeadlockDetect(final long handle); + private native WaitingTransactions getWaitingTxns(final long handle); + private native byte getState(final long handle); + private native long getId(final long handle); + + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDB.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDB.java new file mode 100644 index 000000000..86f25fe15 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDB.java @@ -0,0 +1,403 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Database with Transaction support + */ +public class TransactionDB extends RocksDB + implements TransactionalDB { + + private TransactionDBOptions transactionDbOptions_; + + /** + * Private constructor. + * + * @param nativeHandle The native handle of the C++ TransactionDB object + */ + private TransactionDB(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Open a TransactionDB, similar to {@link RocksDB#open(Options, String)}. + * + * @param options {@link org.rocksdb.Options} instance. + * @param transactionDbOptions {@link org.rocksdb.TransactionDBOptions} + * instance. + * @param path the path to the rocksdb. + * + * @return a {@link TransactionDB} instance on success, null if the specified + * {@link TransactionDB} can not be opened. + * + * @throws RocksDBException if an error occurs whilst opening the database. + */ + public static TransactionDB open(final Options options, + final TransactionDBOptions transactionDbOptions, final String path) + throws RocksDBException { + final TransactionDB tdb = new TransactionDB(open(options.nativeHandle_, + transactionDbOptions.nativeHandle_, path)); + + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + tdb.storeOptionsInstance(options); + tdb.storeTransactionDbOptions(transactionDbOptions); + + return tdb; + } + + /** + * Open a TransactionDB, similar to + * {@link RocksDB#open(DBOptions, String, List, List)}. + * + * @param dbOptions {@link org.rocksdb.DBOptions} instance. + * @param transactionDbOptions {@link org.rocksdb.TransactionDBOptions} + * instance. + * @param path the path to the rocksdb. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * + * @return a {@link TransactionDB} instance on success, null if the specified + * {@link TransactionDB} can not be opened. + * + * @throws RocksDBException if an error occurs whilst opening the database. + */ + public static TransactionDB open(final DBOptions dbOptions, + final TransactionDBOptions transactionDbOptions, + final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) + throws RocksDBException { + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors + .get(i); + cfNames[i] = cfDescriptor.getName(); + cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_; + } + + final long[] handles = open(dbOptions.nativeHandle_, + transactionDbOptions.nativeHandle_, path, cfNames, cfOptionHandles); + final TransactionDB tdb = new TransactionDB(handles[0]); + + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + tdb.storeOptionsInstance(dbOptions); + tdb.storeTransactionDbOptions(transactionDbOptions); + + for (int i = 1; i < handles.length; i++) { + columnFamilyHandles.add(new ColumnFamilyHandle(tdb, handles[i])); + } + + return tdb; + } + + /** + * This is similar to {@link #close()} except that it + * throws an exception if any error occurs. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + * + * @throws RocksDBException if an error occurs whilst closing. + */ + public void closeE() throws RocksDBException { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } finally { + disposeInternal(); + } + } + } + + /** + * This is similar to {@link #closeE()} except that it + * silently ignores any errors. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + */ + @Override + public void close() { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } catch (final RocksDBException e) { + // silently ignore the error report + } finally { + disposeInternal(); + } + } + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions) { + return new Transaction(this, beginTransaction(nativeHandle_, + writeOptions.nativeHandle_)); + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final TransactionOptions transactionOptions) { + return new Transaction(this, beginTransaction(nativeHandle_, + writeOptions.nativeHandle_, transactionOptions.nativeHandle_)); + } + + // TODO(AR) consider having beingTransaction(... oldTransaction) set a + // reference count inside Transaction, so that we can always call + // Transaction#close but the object is only disposed when there are as many + // closes as beginTransaction. Makes the try-with-resources paradigm easier for + // java developers + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final Transaction oldTransaction) { + final long jtxnHandle = beginTransaction_withOld(nativeHandle_, + writeOptions.nativeHandle_, oldTransaction.nativeHandle_); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(jtxnHandle == oldTransaction.nativeHandle_); + + return oldTransaction; + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions, + final TransactionOptions transactionOptions, + final Transaction oldTransaction) { + final long jtxn_handle = beginTransaction_withOld(nativeHandle_, + writeOptions.nativeHandle_, transactionOptions.nativeHandle_, + oldTransaction.nativeHandle_); + + // RocksJava relies on the assumption that + // we do not allocate a new Transaction object + // when providing an old_txn + assert(jtxn_handle == oldTransaction.nativeHandle_); + + return oldTransaction; + } + + public Transaction getTransactionByName(final String transactionName) { + final long jtxnHandle = getTransactionByName(nativeHandle_, transactionName); + if(jtxnHandle == 0) { + return null; + } + + final Transaction txn = new Transaction(this, jtxnHandle); + + // this instance doesn't own the underlying C++ object + txn.disOwnNativeHandle(); + + return txn; + } + + public List getAllPreparedTransactions() { + final long[] jtxnHandles = getAllPreparedTransactions(nativeHandle_); + + final List txns = new ArrayList<>(); + for(final long jtxnHandle : jtxnHandles) { + final Transaction txn = new Transaction(this, jtxnHandle); + + // this instance doesn't own the underlying C++ object + txn.disOwnNativeHandle(); + + txns.add(txn); + } + return txns; + } + + public static class KeyLockInfo { + private final String key; + private final long[] transactionIDs; + private final boolean exclusive; + + public KeyLockInfo(final String key, final long transactionIDs[], + final boolean exclusive) { + this.key = key; + this.transactionIDs = transactionIDs; + this.exclusive = exclusive; + } + + /** + * Get the key. + * + * @return the key + */ + public String getKey() { + return key; + } + + /** + * Get the Transaction IDs. + * + * @return the Transaction IDs. + */ + public long[] getTransactionIDs() { + return transactionIDs; + } + + /** + * Get the Lock status. + * + * @return true if the lock is exclusive, false if the lock is shared. + */ + public boolean isExclusive() { + return exclusive; + } + } + + /** + * Returns map of all locks held. + * + * @return a map of all the locks held. + */ + public Map getLockStatusData() { + return getLockStatusData(nativeHandle_); + } + + /** + * Called from C++ native method {@link #getDeadlockInfoBuffer(long)} + * to construct a DeadlockInfo object. + * + * @param transactionID The transaction id + * @param columnFamilyId The id of the {@link ColumnFamilyHandle} + * @param waitingKey the key that we are waiting on + * @param exclusive true if the lock is exclusive, false if the lock is shared + * + * @return The waiting transactions + */ + private DeadlockInfo newDeadlockInfo( + final long transactionID, final long columnFamilyId, + final String waitingKey, final boolean exclusive) { + return new DeadlockInfo(transactionID, columnFamilyId, + waitingKey, exclusive); + } + + public static class DeadlockInfo { + private final long transactionID; + private final long columnFamilyId; + private final String waitingKey; + private final boolean exclusive; + + private DeadlockInfo(final long transactionID, final long columnFamilyId, + final String waitingKey, final boolean exclusive) { + this.transactionID = transactionID; + this.columnFamilyId = columnFamilyId; + this.waitingKey = waitingKey; + this.exclusive = exclusive; + } + + /** + * Get the Transaction ID. + * + * @return the transaction ID + */ + public long getTransactionID() { + return transactionID; + } + + /** + * Get the Column Family ID. + * + * @return The column family ID + */ + public long getColumnFamilyId() { + return columnFamilyId; + } + + /** + * Get the key that we are waiting on. + * + * @return the key that we are waiting on + */ + public String getWaitingKey() { + return waitingKey; + } + + /** + * Get the Lock status. + * + * @return true if the lock is exclusive, false if the lock is shared. + */ + public boolean isExclusive() { + return exclusive; + } + } + + public static class DeadlockPath { + final DeadlockInfo[] path; + final boolean limitExceeded; + + public DeadlockPath(final DeadlockInfo[] path, final boolean limitExceeded) { + this.path = path; + this.limitExceeded = limitExceeded; + } + + public boolean isEmpty() { + return path.length == 0 && !limitExceeded; + } + } + + public DeadlockPath[] getDeadlockInfoBuffer() { + return getDeadlockInfoBuffer(nativeHandle_); + } + + public void setDeadlockInfoBufferSize(final int targetSize) { + setDeadlockInfoBufferSize(nativeHandle_, targetSize); + } + + private void storeTransactionDbOptions( + final TransactionDBOptions transactionDbOptions) { + this.transactionDbOptions_ = transactionDbOptions; + } + + @Override protected final native void disposeInternal(final long handle); + + private static native long open(final long optionsHandle, + final long transactionDbOptionsHandle, final String path) + throws RocksDBException; + private static native long[] open(final long dbOptionsHandle, + final long transactionDbOptionsHandle, final String path, + final byte[][] columnFamilyNames, final long[] columnFamilyOptions); + private native static void closeDatabase(final long handle) + throws RocksDBException; + private native long beginTransaction(final long handle, + final long writeOptionsHandle); + private native long beginTransaction(final long handle, + final long writeOptionsHandle, final long transactionOptionsHandle); + private native long beginTransaction_withOld(final long handle, + final long writeOptionsHandle, final long oldTransactionHandle); + private native long beginTransaction_withOld(final long handle, + final long writeOptionsHandle, final long transactionOptionsHandle, + final long oldTransactionHandle); + private native long getTransactionByName(final long handle, + final String name); + private native long[] getAllPreparedTransactions(final long handle); + private native Map getLockStatusData( + final long handle); + private native DeadlockPath[] getDeadlockInfoBuffer(final long handle); + private native void setDeadlockInfoBufferSize(final long handle, + final int targetSize); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDBOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDBOptions.java new file mode 100644 index 000000000..7f4296a7c --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionDBOptions.java @@ -0,0 +1,217 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class TransactionDBOptions extends RocksObject { + + public TransactionDBOptions() { + super(newTransactionDBOptions()); + } + + /** + * Specifies the maximum number of keys that can be locked at the same time + * per column family. + * + * If the number of locked keys is greater than {@link #getMaxNumLocks()}, + * transaction writes (or GetForUpdate) will return an error. + * + * @return The maximum number of keys that can be locked + */ + public long getMaxNumLocks() { + assert(isOwningHandle()); + return getMaxNumLocks(nativeHandle_); + } + + /** + * Specifies the maximum number of keys that can be locked at the same time + * per column family. + * + * If the number of locked keys is greater than {@link #getMaxNumLocks()}, + * transaction writes (or GetForUpdate) will return an error. + * + * @param maxNumLocks The maximum number of keys that can be locked; + * If this value is not positive, no limit will be enforced. + * + * @return this TransactionDBOptions instance + */ + public TransactionDBOptions setMaxNumLocks(final long maxNumLocks) { + assert(isOwningHandle()); + setMaxNumLocks(nativeHandle_, maxNumLocks); + return this; + } + + /** + * The number of sub-tables per lock table (per column family) + * + * @return The number of sub-tables + */ + public long getNumStripes() { + assert(isOwningHandle()); + return getNumStripes(nativeHandle_); + } + + /** + * Increasing this value will increase the concurrency by dividing the lock + * table (per column family) into more sub-tables, each with their own + * separate mutex. + * + * Default: 16 + * + * @param numStripes The number of sub-tables + * + * @return this TransactionDBOptions instance + */ + public TransactionDBOptions setNumStripes(final long numStripes) { + assert(isOwningHandle()); + setNumStripes(nativeHandle_, numStripes); + return this; + } + + /** + * The default wait timeout in milliseconds when + * a transaction attempts to lock a key if not specified by + * {@link TransactionOptions#setLockTimeout(long)} + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, there is no timeout. + * + * @return the default wait timeout in milliseconds + */ + public long getTransactionLockTimeout() { + assert(isOwningHandle()); + return getTransactionLockTimeout(nativeHandle_); + } + + /** + * If positive, specifies the default wait timeout in milliseconds when + * a transaction attempts to lock a key if not specified by + * {@link TransactionOptions#setLockTimeout(long)} + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, there is no timeout. Not using a timeout is not recommended + * as it can lead to deadlocks. Currently, there is no deadlock-detection to + * recover from a deadlock. + * + * Default: 1000 + * + * @param transactionLockTimeout the default wait timeout in milliseconds + * + * @return this TransactionDBOptions instance + */ + public TransactionDBOptions setTransactionLockTimeout( + final long transactionLockTimeout) { + assert(isOwningHandle()); + setTransactionLockTimeout(nativeHandle_, transactionLockTimeout); + return this; + } + + /** + * The wait timeout in milliseconds when writing a key + * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, + * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write} + * directly). + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, there is no timeout and will block indefinitely when acquiring + * a lock. + * + * @return the timeout in milliseconds when writing a key OUTSIDE of a + * transaction + */ + public long getDefaultLockTimeout() { + assert(isOwningHandle()); + return getDefaultLockTimeout(nativeHandle_); + } + + /** + * If positive, specifies the wait timeout in milliseconds when writing a key + * OUTSIDE of a transaction (ie by calling {@link RocksDB#put}, + * {@link RocksDB#merge}, {@link RocksDB#delete} or {@link RocksDB#write} + * directly). + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, there is no timeout and will block indefinitely when acquiring + * a lock. + * + * Not using a timeout can lead to deadlocks. Currently, there + * is no deadlock-detection to recover from a deadlock. While DB writes + * cannot deadlock with other DB writes, they can deadlock with a transaction. + * A negative timeout should only be used if all transactions have a small + * expiration set. + * + * Default: 1000 + * + * @param defaultLockTimeout the timeout in milliseconds when writing a key + * OUTSIDE of a transaction + * @return this TransactionDBOptions instance + */ + public TransactionDBOptions setDefaultLockTimeout( + final long defaultLockTimeout) { + assert(isOwningHandle()); + setDefaultLockTimeout(nativeHandle_, defaultLockTimeout); + return this; + } + +// /** +// * If set, the {@link TransactionDB} will use this implementation of a mutex +// * and condition variable for all transaction locking instead of the default +// * mutex/condvar implementation. +// * +// * @param transactionDbMutexFactory the mutex factory for the transactions +// * +// * @return this TransactionDBOptions instance +// */ +// public TransactionDBOptions setCustomMutexFactory( +// final TransactionDBMutexFactory transactionDbMutexFactory) { +// +// } + + /** + * The policy for when to write the data into the DB. The default policy is to + * write only the committed data {@link TxnDBWritePolicy#WRITE_COMMITTED}. + * The data could be written before the commit phase. The DB then needs to + * provide the mechanisms to tell apart committed from uncommitted data. + * + * @return The write policy. + */ + public TxnDBWritePolicy getWritePolicy() { + assert(isOwningHandle()); + return TxnDBWritePolicy.getTxnDBWritePolicy(getWritePolicy(nativeHandle_)); + } + + /** + * The policy for when to write the data into the DB. The default policy is to + * write only the committed data {@link TxnDBWritePolicy#WRITE_COMMITTED}. + * The data could be written before the commit phase. The DB then needs to + * provide the mechanisms to tell apart committed from uncommitted data. + * + * @param writePolicy The write policy. + * + * @return this TransactionDBOptions instance + */ + public TransactionDBOptions setWritePolicy( + final TxnDBWritePolicy writePolicy) { + assert(isOwningHandle()); + setWritePolicy(nativeHandle_, writePolicy.getValue()); + return this; + } + + private native static long newTransactionDBOptions(); + private native long getMaxNumLocks(final long handle); + private native void setMaxNumLocks(final long handle, + final long maxNumLocks); + private native long getNumStripes(final long handle); + private native void setNumStripes(final long handle, final long numStripes); + private native long getTransactionLockTimeout(final long handle); + private native void setTransactionLockTimeout(final long handle, + final long transactionLockTimeout); + private native long getDefaultLockTimeout(final long handle); + private native void setDefaultLockTimeout(final long handle, + final long transactionLockTimeout); + private native byte getWritePolicy(final long handle); + private native void setWritePolicy(final long handle, final byte writePolicy); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java new file mode 100644 index 000000000..5d9ec58d7 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java @@ -0,0 +1,112 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +/** + *

A TransactionLogIterator is used to iterate over the transactions in a db. + * One run of the iterator is continuous, i.e. the iterator will stop at the + * beginning of any gap in sequences.

+ */ +public class TransactionLogIterator extends RocksObject { + + /** + *

An iterator is either positioned at a WriteBatch + * or not valid. This method returns true if the iterator + * is valid. Can read data from a valid iterator.

+ * + * @return true if iterator position is valid. + */ + public boolean isValid() { + return isValid(nativeHandle_); + } + + /** + *

Moves the iterator to the next WriteBatch. + * REQUIRES: Valid() to be true.

+ */ + public void next() { + next(nativeHandle_); + } + + /** + *

Throws RocksDBException if something went wrong.

+ * + * @throws org.rocksdb.RocksDBException if something went + * wrong in the underlying C++ code. + */ + public void status() throws RocksDBException { + status(nativeHandle_); + } + + /** + *

If iterator position is valid, return the current + * write_batch and the sequence number of the earliest + * transaction contained in the batch.

+ * + *

ONLY use if Valid() is true and status() is OK.

+ * + * @return {@link org.rocksdb.TransactionLogIterator.BatchResult} + * instance. + */ + public BatchResult getBatch() { + assert(isValid()); + return getBatch(nativeHandle_); + } + + /** + *

TransactionLogIterator constructor.

+ * + * @param nativeHandle address to native address. + */ + TransactionLogIterator(final long nativeHandle) { + super(nativeHandle); + } + + /** + *

BatchResult represents a data structure returned + * by a TransactionLogIterator containing a sequence + * number and a {@link WriteBatch} instance.

+ */ + public static final class BatchResult { + /** + *

Constructor of BatchResult class.

+ * + * @param sequenceNumber related to this BatchResult instance. + * @param nativeHandle to {@link org.rocksdb.WriteBatch} + * native instance. + */ + public BatchResult(final long sequenceNumber, + final long nativeHandle) { + sequenceNumber_ = sequenceNumber; + writeBatch_ = new WriteBatch(nativeHandle, true); + } + + /** + *

Return sequence number related to this BatchResult.

+ * + * @return Sequence number. + */ + public long sequenceNumber() { + return sequenceNumber_; + } + + /** + *

Return contained {@link org.rocksdb.WriteBatch} + * instance

+ * + * @return {@link org.rocksdb.WriteBatch} instance. + */ + public WriteBatch writeBatch() { + return writeBatch_; + } + + private final long sequenceNumber_; + private final WriteBatch writeBatch_; + } + + @Override protected final native void disposeInternal(final long handle); + private native boolean isValid(long handle); + private native void next(long handle); + private native void status(long handle) + throws RocksDBException; + private native BatchResult getBatch(long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionOptions.java new file mode 100644 index 000000000..195fc85e4 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionOptions.java @@ -0,0 +1,189 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class TransactionOptions extends RocksObject + implements TransactionalOptions { + + public TransactionOptions() { + super(newTransactionOptions()); + } + + @Override + public boolean isSetSnapshot() { + assert(isOwningHandle()); + return isSetSnapshot(nativeHandle_); + } + + @Override + public TransactionOptions setSetSnapshot(final boolean setSnapshot) { + assert(isOwningHandle()); + setSetSnapshot(nativeHandle_, setSnapshot); + return this; + } + + /** + * True means that before acquiring locks, this transaction will + * check if doing so will cause a deadlock. If so, it will return with + * {@link Status.Code#Busy}. The user should retry their transaction. + * + * @return true if a deadlock is detected. + */ + public boolean isDeadlockDetect() { + assert(isOwningHandle()); + return isDeadlockDetect(nativeHandle_); + } + + /** + * Setting to true means that before acquiring locks, this transaction will + * check if doing so will cause a deadlock. If so, it will return with + * {@link Status.Code#Busy}. The user should retry their transaction. + * + * @param deadlockDetect true if we should detect deadlocks. + * + * @return this TransactionOptions instance + */ + public TransactionOptions setDeadlockDetect(final boolean deadlockDetect) { + assert(isOwningHandle()); + setDeadlockDetect(nativeHandle_, deadlockDetect); + return this; + } + + /** + * The wait timeout in milliseconds when a transaction attempts to lock a key. + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} + * will be used + * + * @return the lock timeout in milliseconds + */ + public long getLockTimeout() { + assert(isOwningHandle()); + return getLockTimeout(nativeHandle_); + } + + /** + * If positive, specifies the wait timeout in milliseconds when + * a transaction attempts to lock a key. + * + * If 0, no waiting is done if a lock cannot instantly be acquired. + * If negative, {@link TransactionDBOptions#getTransactionLockTimeout(long)} + * will be used + * + * Default: -1 + * + * @param lockTimeout the lock timeout in milliseconds + * + * @return this TransactionOptions instance + */ + public TransactionOptions setLockTimeout(final long lockTimeout) { + assert(isOwningHandle()); + setLockTimeout(nativeHandle_, lockTimeout); + return this; + } + + /** + * Expiration duration in milliseconds. + * + * If non-negative, transactions that last longer than this many milliseconds + * will fail to commit. If not set, a forgotten transaction that is never + * committed, rolled back, or deleted will never relinquish any locks it + * holds. This could prevent keys from being written by other writers. + * + * @return expiration the expiration duration in milliseconds + */ + public long getExpiration() { + assert(isOwningHandle()); + return getExpiration(nativeHandle_); + } + + /** + * Expiration duration in milliseconds. + * + * If non-negative, transactions that last longer than this many milliseconds + * will fail to commit. If not set, a forgotten transaction that is never + * committed, rolled back, or deleted will never relinquish any locks it + * holds. This could prevent keys from being written by other writers. + * + * Default: -1 + * + * @param expiration the expiration duration in milliseconds + * + * @return this TransactionOptions instance + */ + public TransactionOptions setExpiration(final long expiration) { + assert(isOwningHandle()); + setExpiration(nativeHandle_, expiration); + return this; + } + + /** + * Gets the number of traversals to make during deadlock detection. + * + * @return the number of traversals to make during + * deadlock detection + */ + public long getDeadlockDetectDepth() { + return getDeadlockDetectDepth(nativeHandle_); + } + + /** + * Sets the number of traversals to make during deadlock detection. + * + * Default: 50 + * + * @param deadlockDetectDepth the number of traversals to make during + * deadlock detection + * + * @return this TransactionOptions instance + */ + public TransactionOptions setDeadlockDetectDepth( + final long deadlockDetectDepth) { + setDeadlockDetectDepth(nativeHandle_, deadlockDetectDepth); + return this; + } + + /** + * Get the maximum number of bytes that may be used for the write batch. + * + * @return the maximum number of bytes, 0 means no limit. + */ + public long getMaxWriteBatchSize() { + return getMaxWriteBatchSize(nativeHandle_); + } + + /** + * Set the maximum number of bytes that may be used for the write batch. + * + * @param maxWriteBatchSize the maximum number of bytes, 0 means no limit. + * + * @return this TransactionOptions instance + */ + public TransactionOptions setMaxWriteBatchSize(final long maxWriteBatchSize) { + setMaxWriteBatchSize(nativeHandle_, maxWriteBatchSize); + return this; + } + + private native static long newTransactionOptions(); + private native boolean isSetSnapshot(final long handle); + private native void setSetSnapshot(final long handle, + final boolean setSnapshot); + private native boolean isDeadlockDetect(final long handle); + private native void setDeadlockDetect(final long handle, + final boolean deadlockDetect); + private native long getLockTimeout(final long handle); + private native void setLockTimeout(final long handle, final long lockTimeout); + private native long getExpiration(final long handle); + private native void setExpiration(final long handle, final long expiration); + private native long getDeadlockDetectDepth(final long handle); + private native void setDeadlockDetectDepth(final long handle, + final long deadlockDetectDepth); + private native long getMaxWriteBatchSize(final long handle); + private native void setMaxWriteBatchSize(final long handle, + final long maxWriteBatchSize); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalDB.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalDB.java new file mode 100644 index 000000000..740181989 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalDB.java @@ -0,0 +1,65 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +interface TransactionalDB> extends AutoCloseable { + /** + * Starts a new Transaction. + * + * Caller is responsible for calling {@link #close()} on the returned + * transaction when it is no longer needed. + * + * @param writeOptions Any write options for the transaction + * @return a new transaction + */ + Transaction beginTransaction(final WriteOptions writeOptions); + + /** + * Starts a new Transaction. + * + * Caller is responsible for calling {@link #close()} on the returned + * transaction when it is no longer needed. + * + * @param writeOptions Any write options for the transaction + * @param transactionOptions Any options for the transaction + * @return a new transaction + */ + Transaction beginTransaction(final WriteOptions writeOptions, + final T transactionOptions); + + /** + * Starts a new Transaction. + * + * Caller is responsible for calling {@link #close()} on the returned + * transaction when it is no longer needed. + * + * @param writeOptions Any write options for the transaction + * @param oldTransaction this Transaction will be reused instead of allocating + * a new one. This is an optimization to avoid extra allocations + * when repeatedly creating transactions. + * @return The oldTransaction which has been reinitialized as a new + * transaction + */ + Transaction beginTransaction(final WriteOptions writeOptions, + final Transaction oldTransaction); + + /** + * Starts a new Transaction. + * + * Caller is responsible for calling {@link #close()} on the returned + * transaction when it is no longer needed. + * + * @param writeOptions Any write options for the transaction + * @param transactionOptions Any options for the transaction + * @param oldTransaction this Transaction will be reused instead of allocating + * a new one. This is an optimization to avoid extra allocations + * when repeatedly creating transactions. + * @return The oldTransaction which has been reinitialized as a new + * transaction + */ + Transaction beginTransaction(final WriteOptions writeOptions, + final T transactionOptions, final Transaction oldTransaction); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalOptions.java new file mode 100644 index 000000000..d55ee900c --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TransactionalOptions.java @@ -0,0 +1,31 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + + +interface TransactionalOptions> + extends AutoCloseable { + + /** + * True indicates snapshots will be set, just like if + * {@link Transaction#setSnapshot()} had been called + * + * @return whether a snapshot will be set + */ + boolean isSetSnapshot(); + + /** + * Setting the setSnapshot to true is the same as calling + * {@link Transaction#setSnapshot()}. + * + * Default: false + * + * @param setSnapshot Whether to set a snapshot + * + * @return this TransactionalOptions instance + */ + T setSetSnapshot(final boolean setSnapshot); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java b/src/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java new file mode 100644 index 000000000..a7adaf4b2 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java @@ -0,0 +1,245 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.List; + +/** + * Database with TTL support. + * + *

Use case

+ *

This API should be used to open the db when key-values inserted are + * meant to be removed from the db in a non-strict 'ttl' amount of time + * Therefore, this guarantees that key-values inserted will remain in the + * db for >= ttl amount of time and the db will make efforts to remove the + * key-values as soon as possible after ttl seconds of their insertion. + *

+ * + *

Behaviour

+ *

TTL is accepted in seconds + * (int32_t)Timestamp(creation) is suffixed to values in Put internally + * Expired TTL values deleted in compaction only:(Timestamp+ttl<time_now) + * Get/Iterator may return expired entries(compaction not run on them yet) + * Different TTL may be used during different Opens + *

+ * + *

Example

+ *
    + *
  • Open1 at t=0 with ttl=4 and insert k1,k2, close at t=2
  • + *
  • Open2 at t=3 with ttl=5. Now k1,k2 should be deleted at t>=5
  • + *
+ * + *

+ * read_only=true opens in the usual read-only mode. Compactions will not be + * triggered(neither manual nor automatic), so no expired entries removed + *

+ * + *

Constraints

+ *

Not specifying/passing or non-positive TTL behaves + * like TTL = infinity

+ * + *

!!!WARNING!!!

+ *

Calling DB::Open directly to re-open a db created by this API will get + * corrupt values(timestamp suffixed) and no ttl effect will be there + * during the second Open, so use this API consistently to open the db + * Be careful when passing ttl with a small positive value because the + * whole database may be deleted in a small amount of time.

+ */ +public class TtlDB extends RocksDB { + + /** + *

Opens a TtlDB.

+ * + *

Database is opened in read-write mode without default TTL.

+ * + * @param options {@link org.rocksdb.Options} instance. + * @param db_path path to database. + * + * @return TtlDB instance. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public static TtlDB open(final Options options, final String db_path) + throws RocksDBException { + return open(options, db_path, 0, false); + } + + /** + *

Opens a TtlDB.

+ * + * @param options {@link org.rocksdb.Options} instance. + * @param db_path path to database. + * @param ttl time to live for new entries. + * @param readOnly boolean value indicating if database if db is + * opened read-only. + * + * @return TtlDB instance. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public static TtlDB open(final Options options, final String db_path, + final int ttl, final boolean readOnly) throws RocksDBException { + return new TtlDB(open(options.nativeHandle_, db_path, ttl, readOnly)); + } + + /** + *

Opens a TtlDB.

+ * + * @param options {@link org.rocksdb.Options} instance. + * @param db_path path to database. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * @param ttlValues time to live values per column family handle + * @param readOnly boolean value indicating if database if db is + * opened read-only. + * + * @return TtlDB instance. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + * @throws java.lang.IllegalArgumentException when there is not a ttl value + * per given column family handle. + */ + public static TtlDB open(final DBOptions options, final String db_path, + final List columnFamilyDescriptors, + final List columnFamilyHandles, + final List ttlValues, final boolean readOnly) + throws RocksDBException { + if (columnFamilyDescriptors.size() != ttlValues.size()) { + throw new IllegalArgumentException("There must be a ttl value per column" + + " family handle."); + } + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor cfDescriptor = + columnFamilyDescriptors.get(i); + cfNames[i] = cfDescriptor.getName(); + cfOptionHandles[i] = cfDescriptor.getOptions().nativeHandle_; + } + + final int ttlVals[] = new int[ttlValues.size()]; + for(int i = 0; i < ttlValues.size(); i++) { + ttlVals[i] = ttlValues.get(i); + } + final long[] handles = openCF(options.nativeHandle_, db_path, + cfNames, cfOptionHandles, ttlVals, readOnly); + + final TtlDB ttlDB = new TtlDB(handles[0]); + for (int i = 1; i < handles.length; i++) { + columnFamilyHandles.add(new ColumnFamilyHandle(ttlDB, handles[i])); + } + return ttlDB; + } + + /** + *

Close the TtlDB instance and release resource.

+ * + * This is similar to {@link #close()} except that it + * throws an exception if any error occurs. + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + * + * @throws RocksDBException if an error occurs whilst closing. + */ + public void closeE() throws RocksDBException { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } finally { + disposeInternal(); + } + } + } + + /** + *

Close the TtlDB instance and release resource.

+ * + * + * This will not fsync the WAL files. + * If syncing is required, the caller must first call {@link #syncWal()} + * or {@link #write(WriteOptions, WriteBatch)} using an empty write batch + * with {@link WriteOptions#setSync(boolean)} set to true. + * + * See also {@link #close()}. + */ + @Override + public void close() { + if (owningHandle_.compareAndSet(true, false)) { + try { + closeDatabase(nativeHandle_); + } catch (final RocksDBException e) { + // silently ignore the error report + } finally { + disposeInternal(); + } + } + } + + /** + *

Creates a new ttl based column family with a name defined + * in given ColumnFamilyDescriptor and allocates a + * ColumnFamilyHandle within an internal structure.

+ * + *

The ColumnFamilyHandle is automatically disposed with DB + * disposal.

+ * + * @param columnFamilyDescriptor column family to be created. + * @param ttl TTL to set for this column family. + * + * @return {@link org.rocksdb.ColumnFamilyHandle} instance. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public ColumnFamilyHandle createColumnFamilyWithTtl( + final ColumnFamilyDescriptor columnFamilyDescriptor, + final int ttl) throws RocksDBException { + return new ColumnFamilyHandle(this, + createColumnFamilyWithTtl(nativeHandle_, + columnFamilyDescriptor.getName(), + columnFamilyDescriptor.getOptions().nativeHandle_, ttl)); + } + + /** + *

A protected constructor that will be used in the static + * factory method + * {@link #open(Options, String, int, boolean)} + * and + * {@link #open(DBOptions, String, java.util.List, java.util.List, + * java.util.List, boolean)}. + *

+ * + * @param nativeHandle The native handle of the C++ TtlDB object + */ + protected TtlDB(final long nativeHandle) { + super(nativeHandle); + } + + @Override protected native void disposeInternal(final long handle); + + private native static long open(final long optionsHandle, + final String db_path, final int ttl, final boolean readOnly) + throws RocksDBException; + private native static long[] openCF(final long optionsHandle, + final String db_path, final byte[][] columnFamilyNames, + final long[] columnFamilyOptions, final int[] ttlValues, + final boolean readOnly) throws RocksDBException; + private native long createColumnFamilyWithTtl(final long handle, + final byte[] columnFamilyName, final long columnFamilyOptions, int ttl) + throws RocksDBException; + private native static void closeDatabase(final long handle) + throws RocksDBException; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java b/src/rocksdb/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java new file mode 100644 index 000000000..837ce6157 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/TxnDBWritePolicy.java @@ -0,0 +1,62 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +/** + * The transaction db write policy. + */ +public enum TxnDBWritePolicy { + /** + * Write only the committed data. + */ + WRITE_COMMITTED((byte)0x00), + + /** + * Write data after the prepare phase of 2pc. + */ + WRITE_PREPARED((byte)0x1), + + /** + * Write data before the prepare phase of 2pc. + */ + WRITE_UNPREPARED((byte)0x2); + + private byte value; + + TxnDBWritePolicy(final byte value) { + this.value = value; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + *

Get the TxnDBWritePolicy enumeration value by + * passing the byte identifier to this method.

+ * + * @param byteIdentifier of TxnDBWritePolicy. + * + * @return TxnDBWritePolicy instance. + * + * @throws IllegalArgumentException If TxnDBWritePolicy cannot be found for + * the provided byteIdentifier + */ + public static TxnDBWritePolicy getTxnDBWritePolicy(final byte byteIdentifier) { + for (final TxnDBWritePolicy txnDBWritePolicy : TxnDBWritePolicy.values()) { + if (txnDBWritePolicy.getValue() == byteIdentifier) { + return txnDBWritePolicy; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for TxnDBWritePolicy."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/UInt64AddOperator.java b/src/rocksdb/java/src/main/java/org/rocksdb/UInt64AddOperator.java new file mode 100644 index 000000000..cce9b298d --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/UInt64AddOperator.java @@ -0,0 +1,19 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Uint64AddOperator is a merge operator that accumlates a long + * integer value. + */ +public class UInt64AddOperator extends MergeOperator { + public UInt64AddOperator() { + super(newSharedUInt64AddOperator()); + } + + private native static long newSharedUInt64AddOperator(); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java b/src/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java new file mode 100644 index 000000000..fb1e7a948 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java @@ -0,0 +1,46 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +/** + * The config for vector memtable representation. + */ +public class VectorMemTableConfig extends MemTableConfig { + public static final int DEFAULT_RESERVED_SIZE = 0; + + /** + * VectorMemTableConfig constructor + */ + public VectorMemTableConfig() { + reservedSize_ = DEFAULT_RESERVED_SIZE; + } + + /** + * Set the initial size of the vector that will be used + * by the memtable created based on this config. + * + * @param size the initial size of the vector. + * @return the reference to the current config. + */ + public VectorMemTableConfig setReservedSize(final int size) { + reservedSize_ = size; + return this; + } + + /** + * Returns the initial size of the vector used by the memtable + * created based on this config. + * + * @return the initial size of the vector. + */ + public int reservedSize() { + return reservedSize_; + } + + @Override protected long newMemTableFactoryHandle() { + return newMemTableFactoryHandle(reservedSize_); + } + + private native long newMemTableFactoryHandle(long reservedSize) + throws IllegalArgumentException; + private int reservedSize_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java b/src/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java new file mode 100644 index 000000000..d8b9eeced --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WALRecoveryMode.java @@ -0,0 +1,83 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * The WAL Recover Mode + */ +public enum WALRecoveryMode { + + /** + * Original levelDB recovery + * + * We tolerate incomplete record in trailing data on all logs + * Use case : This is legacy behavior (default) + */ + TolerateCorruptedTailRecords((byte)0x00), + + /** + * Recover from clean shutdown + * + * We don't expect to find any corruption in the WAL + * Use case : This is ideal for unit tests and rare applications that + * can require high consistency guarantee + */ + AbsoluteConsistency((byte)0x01), + + /** + * Recover to point-in-time consistency + * We stop the WAL playback on discovering WAL inconsistency + * Use case : Ideal for systems that have disk controller cache like + * hard disk, SSD without super capacitor that store related data + */ + PointInTimeRecovery((byte)0x02), + + /** + * Recovery after a disaster + * We ignore any corruption in the WAL and try to salvage as much data as + * possible + * Use case : Ideal for last ditch effort to recover data or systems that + * operate with low grade unrelated data + */ + SkipAnyCorruptedRecords((byte)0x03); + + private byte value; + + WALRecoveryMode(final byte value) { + this.value = value; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + *

Get the WALRecoveryMode enumeration value by + * passing the byte identifier to this method.

+ * + * @param byteIdentifier of WALRecoveryMode. + * + * @return WALRecoveryMode instance. + * + * @throws IllegalArgumentException If WALRecoveryMode cannot be found for the + * provided byteIdentifier + */ + public static WALRecoveryMode getWALRecoveryMode(final byte byteIdentifier) { + for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) { + if (walRecoveryMode.getValue() == byteIdentifier) { + return walRecoveryMode; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for WALRecoveryMode."); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/src/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java new file mode 100644 index 000000000..ce146eb3f --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java @@ -0,0 +1,203 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +public class WBWIRocksIterator + extends AbstractRocksIterator { + private final WriteEntry entry = new WriteEntry(); + + protected WBWIRocksIterator(final WriteBatchWithIndex wbwi, + final long nativeHandle) { + super(wbwi, nativeHandle); + } + + /** + * Get the current entry + * + * The WriteEntry is only valid + * until the iterator is repositioned. + * If you want to keep the WriteEntry across iterator + * movements, you must make a copy of its data! + * + * Note - This method is not thread-safe with respect to the WriteEntry + * as it performs a non-atomic update across the fields of the WriteEntry + * + * @return The WriteEntry of the current entry + */ + public WriteEntry entry() { + assert(isOwningHandle()); + final long[] ptrs = entry1(nativeHandle_); + + entry.type = WriteType.fromId((byte)ptrs[0]); + entry.key.resetNativeHandle(ptrs[1], ptrs[1] != 0); + entry.value.resetNativeHandle(ptrs[2], ptrs[2] != 0); + + return entry; + } + + @Override protected final native void disposeInternal(final long handle); + @Override final native boolean isValid0(long handle); + @Override final native void seekToFirst0(long handle); + @Override final native void seekToLast0(long handle); + @Override final native void next0(long handle); + @Override final native void prev0(long handle); + @Override final native void refresh0(final long handle) throws RocksDBException; + @Override final native void seek0(long handle, byte[] target, int targetLen); + @Override final native void seekForPrev0(long handle, byte[] target, int targetLen); + @Override final native void status0(long handle) throws RocksDBException; + @Override + final native void seekDirect0( + final long handle, final ByteBuffer target, final int targetOffset, final int targetLen); + @Override + final native void seekForPrevDirect0( + final long handle, final ByteBuffer target, final int targetOffset, final int targetLen); + @Override + final native void seekByteArray0( + final long handle, final byte[] target, final int targetOffset, final int targetLen); + @Override + final native void seekForPrevByteArray0( + final long handle, final byte[] target, final int targetOffset, final int targetLen); + + private native long[] entry1(final long handle); + + /** + * Enumeration of the Write operation + * that created the record in the Write Batch + */ + public enum WriteType { + PUT((byte)0x0), + MERGE((byte)0x1), + DELETE((byte)0x2), + SINGLE_DELETE((byte)0x3), + DELETE_RANGE((byte)0x4), + LOG((byte)0x5), + XID((byte)0x6); + + final byte id; + WriteType(final byte id) { + this.id = id; + } + + public static WriteType fromId(final byte id) { + for(final WriteType wt : WriteType.values()) { + if(id == wt.id) { + return wt; + } + } + throw new IllegalArgumentException("No WriteType with id=" + id); + } + } + + @Override + public void close() { + entry.close(); + super.close(); + } + + /** + * Represents an entry returned by + * {@link org.rocksdb.WBWIRocksIterator#entry()} + * + * It is worth noting that a WriteEntry with + * the type {@link org.rocksdb.WBWIRocksIterator.WriteType#DELETE} + * or {@link org.rocksdb.WBWIRocksIterator.WriteType#LOG} + * will not have a value. + */ + public static class WriteEntry implements AutoCloseable { + WriteType type = null; + final DirectSlice key; + final DirectSlice value; + + /** + * Intentionally private as this + * should only be instantiated in + * this manner by the outer WBWIRocksIterator + * class; The class members are then modified + * by calling {@link org.rocksdb.WBWIRocksIterator#entry()} + */ + private WriteEntry() { + key = new DirectSlice(); + value = new DirectSlice(); + } + + public WriteEntry(final WriteType type, final DirectSlice key, + final DirectSlice value) { + this.type = type; + this.key = key; + this.value = value; + } + + /** + * Returns the type of the Write Entry + * + * @return the WriteType of the WriteEntry + */ + public WriteType getType() { + return type; + } + + /** + * Returns the key of the Write Entry + * + * @return The slice containing the key + * of the WriteEntry + */ + public DirectSlice getKey() { + return key; + } + + /** + * Returns the value of the Write Entry + * + * @return The slice containing the value of + * the WriteEntry or null if the WriteEntry has + * no value + */ + public DirectSlice getValue() { + if(!value.isOwningHandle()) { + return null; //TODO(AR) migrate to JDK8 java.util.Optional#empty() + } else { + return value; + } + } + + /** + * Generates a hash code for the Write Entry. NOTE: The hash code is based + * on the string representation of the key, so it may not work correctly + * with exotic custom comparators. + * + * @return The hash code for the Write Entry + */ + @Override + public int hashCode() { + return (key == null) ? 0 : key.hashCode(); + } + + @Override + public boolean equals(final Object other) { + if(other == null) { + return false; + } else if (this == other) { + return true; + } else if(other instanceof WriteEntry) { + final WriteEntry otherWriteEntry = (WriteEntry)other; + return type.equals(otherWriteEntry.type) + && key.equals(otherWriteEntry.key) + && value.equals(otherWriteEntry.value); + } else { + return false; + } + } + + @Override + public void close() { + value.close(); + key.close(); + } + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WalFileType.java b/src/rocksdb/java/src/main/java/org/rocksdb/WalFileType.java new file mode 100644 index 000000000..fed27ed11 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WalFileType.java @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum WalFileType { + /** + * Indicates that WAL file is in archive directory. WAL files are moved from + * the main db directory to archive directory once they are not live and stay + * there until cleaned up. Files are cleaned depending on archive size + * (Options::WAL_size_limit_MB) and time since last cleaning + * (Options::WAL_ttl_seconds). + */ + kArchivedLogFile((byte)0x0), + + /** + * Indicates that WAL file is live and resides in the main db directory + */ + kAliveLogFile((byte)0x1); + + private final byte value; + + WalFileType(final byte value) { + this.value = value; + } + + /** + * Get the internal representation value. + * + * @return the internal representation value + */ + byte getValue() { + return value; + } + + /** + * Get the WalFileType from the internal representation value. + * + * @return the wal file type. + * + * @throws IllegalArgumentException if the value is unknown. + */ + static WalFileType fromValue(final byte value) { + for (final WalFileType walFileType : WalFileType.values()) { + if(walFileType.value == value) { + return walFileType; + } + } + + throw new IllegalArgumentException( + "Illegal value provided for WalFileType: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WalFilter.java b/src/rocksdb/java/src/main/java/org/rocksdb/WalFilter.java new file mode 100644 index 000000000..37e36213a --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WalFilter.java @@ -0,0 +1,87 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Map; + +/** + * WALFilter allows an application to inspect write-ahead-log (WAL) + * records or modify their processing on recovery. + */ +public interface WalFilter { + + /** + * Provide ColumnFamily->LogNumber map to filter + * so that filter can determine whether a log number applies to a given + * column family (i.e. that log hasn't been flushed to SST already for the + * column family). + * + * We also pass in name>id map as only name is known during + * recovery (as handles are opened post-recovery). + * while write batch callbacks happen in terms of column family id. + * + * @param cfLognumber column_family_id to lognumber map + * @param cfNameId column_family_name to column_family_id map + */ + void columnFamilyLogNumberMap(final Map cfLognumber, + final Map cfNameId); + + /** + * LogRecord is invoked for each log record encountered for all the logs + * during replay on logs on recovery. This method can be used to: + * * inspect the record (using the batch parameter) + * * ignoring current record + * (by returning WalProcessingOption::kIgnoreCurrentRecord) + * * reporting corrupted record + * (by returning WalProcessingOption::kCorruptedRecord) + * * stop log replay + * (by returning kStop replay) - please note that this implies + * discarding the logs from current record onwards. + * + * @param logNumber log number of the current log. + * Filter might use this to determine if the log + * record is applicable to a certain column family. + * @param logFileName log file name - only for informational purposes + * @param batch batch encountered in the log during recovery + * @param newBatch new batch to populate if filter wants to change + * the batch (for example to filter some records out, or alter some + * records). Please note that the new batch MUST NOT contain + * more records than original, else recovery would be failed. + * + * @return Processing option for the current record. + */ + LogRecordFoundResult logRecordFound(final long logNumber, + final String logFileName, final WriteBatch batch, + final WriteBatch newBatch); + + class LogRecordFoundResult { + public static LogRecordFoundResult CONTINUE_UNCHANGED = + new LogRecordFoundResult(WalProcessingOption.CONTINUE_PROCESSING, false); + + final WalProcessingOption walProcessingOption; + final boolean batchChanged; + + /** + * @param walProcessingOption the processing option + * @param batchChanged Whether batch was changed by the filter. + * It must be set to true if newBatch was populated, + * else newBatch has no effect. + */ + public LogRecordFoundResult(final WalProcessingOption walProcessingOption, + final boolean batchChanged) { + this.walProcessingOption = walProcessingOption; + this.batchChanged = batchChanged; + } + } + + /** + * Returns a name that identifies this WAL filter. + * The name will be printed to LOG file on start up for diagnosis. + * + * @return the name + */ + String name(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WalProcessingOption.java b/src/rocksdb/java/src/main/java/org/rocksdb/WalProcessingOption.java new file mode 100644 index 000000000..889602edc --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WalProcessingOption.java @@ -0,0 +1,54 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum WalProcessingOption { + /** + * Continue processing as usual. + */ + CONTINUE_PROCESSING((byte)0x0), + + /** + * Ignore the current record but continue processing of log(s). + */ + IGNORE_CURRENT_RECORD((byte)0x1), + + /** + * Stop replay of logs and discard logs. + * Logs won't be replayed on subsequent recovery. + */ + STOP_REPLAY((byte)0x2), + + /** + * Corrupted record detected by filter. + */ + CORRUPTED_RECORD((byte)0x3); + + private final byte value; + + WalProcessingOption(final byte value) { + this.value = value; + } + + /** + * Get the internal representation. + * + * @return the internal representation. + */ + byte getValue() { + return value; + } + + public static WalProcessingOption fromValue(final byte value) { + for (final WalProcessingOption walProcessingOption : WalProcessingOption.values()) { + if (walProcessingOption.value == value) { + return walProcessingOption; + } + } + throw new IllegalArgumentException( + "Illegal value provided for WalProcessingOption: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java new file mode 100644 index 000000000..9b46108d0 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java @@ -0,0 +1,396 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + * WriteBatch holds a collection of updates to apply atomically to a DB. + * + * The updates are applied in the order in which they are added + * to the WriteBatch. For example, the value of "key" will be "v3" + * after the following batch is written: + * + * batch.put("key", "v1"); + * batch.remove("key"); + * batch.put("key", "v2"); + * batch.put("key", "v3"); + * + * Multiple threads can invoke const methods on a WriteBatch without + * external synchronization, but if any of the threads may call a + * non-const method, all threads accessing the same WriteBatch must use + * external synchronization. + */ +public class WriteBatch extends AbstractWriteBatch { + /** + * Constructs a WriteBatch instance. + */ + public WriteBatch() { + this(0); + } + + /** + * Constructs a WriteBatch instance with a given size. + * + * @param reserved_bytes reserved size for WriteBatch + */ + public WriteBatch(final int reserved_bytes) { + super(newWriteBatch(reserved_bytes)); + } + + /** + * Constructs a WriteBatch instance from a serialized representation + * as returned by {@link #data()}. + * + * @param serialized the serialized representation. + */ + public WriteBatch(final byte[] serialized) { + super(newWriteBatch(serialized, serialized.length)); + } + + /** + * Support for iterating over the contents of a batch. + * + * @param handler A handler that is called back for each + * update present in the batch + * + * @throws RocksDBException If we cannot iterate over the batch + */ + public void iterate(final Handler handler) throws RocksDBException { + iterate(nativeHandle_, handler.nativeHandle_); + } + + /** + * Retrieve the serialized version of this batch. + * + * @return the serialized representation of this write batch. + * + * @throws RocksDBException if an error occurs whilst retrieving + * the serialized batch data. + */ + public byte[] data() throws RocksDBException { + return data(nativeHandle_); + } + + /** + * Retrieve data size of the batch. + * + * @return the serialized data size of the batch. + */ + public long getDataSize() { + return getDataSize(nativeHandle_); + } + + /** + * Returns true if Put will be called during Iterate. + * + * @return true if Put will be called during Iterate. + */ + public boolean hasPut() { + return hasPut(nativeHandle_); + } + + /** + * Returns true if Delete will be called during Iterate. + * + * @return true if Delete will be called during Iterate. + */ + public boolean hasDelete() { + return hasDelete(nativeHandle_); + } + + /** + * Returns true if SingleDelete will be called during Iterate. + * + * @return true if SingleDelete will be called during Iterate. + */ + public boolean hasSingleDelete() { + return hasSingleDelete(nativeHandle_); + } + + /** + * Returns true if DeleteRange will be called during Iterate. + * + * @return true if DeleteRange will be called during Iterate. + */ + public boolean hasDeleteRange() { + return hasDeleteRange(nativeHandle_); + } + + /** + * Returns true if Merge will be called during Iterate. + * + * @return true if Merge will be called during Iterate. + */ + public boolean hasMerge() { + return hasMerge(nativeHandle_); + } + + /** + * Returns true if MarkBeginPrepare will be called during Iterate. + * + * @return true if MarkBeginPrepare will be called during Iterate. + */ + public boolean hasBeginPrepare() { + return hasBeginPrepare(nativeHandle_); + } + + /** + * Returns true if MarkEndPrepare will be called during Iterate. + * + * @return true if MarkEndPrepare will be called during Iterate. + */ + public boolean hasEndPrepare() { + return hasEndPrepare(nativeHandle_); + } + + /** + * Returns true if MarkCommit will be called during Iterate. + * + * @return true if MarkCommit will be called during Iterate. + */ + public boolean hasCommit() { + return hasCommit(nativeHandle_); + } + + /** + * Returns true if MarkRollback will be called during Iterate. + * + * @return true if MarkRollback will be called during Iterate. + */ + public boolean hasRollback() { + return hasRollback(nativeHandle_); + } + + @Override + public WriteBatch getWriteBatch() { + return this; + } + + /** + * Marks this point in the WriteBatch as the last record to + * be inserted into the WAL, provided the WAL is enabled. + */ + public void markWalTerminationPoint() { + markWalTerminationPoint(nativeHandle_); + } + + /** + * Gets the WAL termination point. + * + * See {@link #markWalTerminationPoint()} + * + * @return the WAL termination point + */ + public SavePoint getWalTerminationPoint() { + return getWalTerminationPoint(nativeHandle_); + } + + @Override + WriteBatch getWriteBatch(final long handle) { + return this; + } + + /** + *

Private WriteBatch constructor which is used to construct + * WriteBatch instances from C++ side. As the reference to this + * object is also managed from C++ side the handle will be disowned.

+ * + * @param nativeHandle address of native instance. + */ + WriteBatch(final long nativeHandle) { + this(nativeHandle, false); + } + + /** + *

Private WriteBatch constructor which is used to construct + * WriteBatch instances.

+ * + * @param nativeHandle address of native instance. + * @param owningNativeHandle whether to own this reference from the C++ side or not + */ + WriteBatch(final long nativeHandle, final boolean owningNativeHandle) { + super(nativeHandle); + if(!owningNativeHandle) + disOwnNativeHandle(); + } + + @Override protected final native void disposeInternal(final long handle); + @Override final native int count0(final long handle); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override + final native void putDirect(final long handle, final ByteBuffer key, final int keyOffset, + final int keyLength, final ByteBuffer value, final int valueOffset, final int valueLength, + final long cfHandle); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override final native void delete(final long handle, final byte[] key, + final int keyLen) throws RocksDBException; + @Override final native void delete(final long handle, final byte[] key, + final int keyLen, final long cfHandle) throws RocksDBException; + @Override final native void singleDelete(final long handle, final byte[] key, + final int keyLen) throws RocksDBException; + @Override final native void singleDelete(final long handle, final byte[] key, + final int keyLen, final long cfHandle) throws RocksDBException; + @Override + final native void deleteDirect(final long handle, final ByteBuffer key, final int keyOffset, + final int keyLength, final long cfHandle) throws RocksDBException; + @Override + final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen, + final byte[] endKey, final int endKeyLen); + @Override + final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen, + final byte[] endKey, final int endKeyLen, final long cfHandle); + @Override final native void putLogData(final long handle, + final byte[] blob, final int blobLen) throws RocksDBException; + @Override final native void clear0(final long handle); + @Override final native void setSavePoint0(final long handle); + @Override final native void rollbackToSavePoint0(final long handle); + @Override final native void popSavePoint(final long handle) throws RocksDBException; + @Override final native void setMaxBytes(final long nativeHandle, + final long maxBytes); + + private native static long newWriteBatch(final int reserved_bytes); + private native static long newWriteBatch(final byte[] serialized, + final int serializedLength); + private native void iterate(final long handle, final long handlerHandle) + throws RocksDBException; + private native byte[] data(final long nativeHandle) throws RocksDBException; + private native long getDataSize(final long nativeHandle); + private native boolean hasPut(final long nativeHandle); + private native boolean hasDelete(final long nativeHandle); + private native boolean hasSingleDelete(final long nativeHandle); + private native boolean hasDeleteRange(final long nativeHandle); + private native boolean hasMerge(final long nativeHandle); + private native boolean hasBeginPrepare(final long nativeHandle); + private native boolean hasEndPrepare(final long nativeHandle); + private native boolean hasCommit(final long nativeHandle); + private native boolean hasRollback(final long nativeHandle); + private native void markWalTerminationPoint(final long nativeHandle); + private native SavePoint getWalTerminationPoint(final long nativeHandle); + + /** + * Handler callback for iterating over the contents of a batch. + */ + public static abstract class Handler + extends RocksCallbackObject { + public Handler() { + super(null); + } + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return createNewHandler0(); + } + + public abstract void put(final int columnFamilyId, final byte[] key, + final byte[] value) throws RocksDBException; + public abstract void put(final byte[] key, final byte[] value); + public abstract void merge(final int columnFamilyId, final byte[] key, + final byte[] value) throws RocksDBException; + public abstract void merge(final byte[] key, final byte[] value); + public abstract void delete(final int columnFamilyId, final byte[] key) + throws RocksDBException; + public abstract void delete(final byte[] key); + public abstract void singleDelete(final int columnFamilyId, + final byte[] key) throws RocksDBException; + public abstract void singleDelete(final byte[] key); + public abstract void deleteRange(final int columnFamilyId, + final byte[] beginKey, final byte[] endKey) throws RocksDBException; + public abstract void deleteRange(final byte[] beginKey, + final byte[] endKey); + public abstract void logData(final byte[] blob); + public abstract void putBlobIndex(final int columnFamilyId, + final byte[] key, final byte[] value) throws RocksDBException; + public abstract void markBeginPrepare() throws RocksDBException; + public abstract void markEndPrepare(final byte[] xid) + throws RocksDBException; + public abstract void markNoop(final boolean emptyBatch) + throws RocksDBException; + public abstract void markRollback(final byte[] xid) + throws RocksDBException; + public abstract void markCommit(final byte[] xid) + throws RocksDBException; + public abstract void markCommitWithTimestamp(final byte[] xid, final byte[] ts) + throws RocksDBException; + + /** + * shouldContinue is called by the underlying iterator + * {@link WriteBatch#iterate(Handler)}. If it returns false, + * iteration is halted. Otherwise, it continues + * iterating. The default implementation always + * returns true. + * + * @return boolean value indicating if the + * iteration is halted. + */ + public boolean shouldContinue() { + return true; + } + + private native long createNewHandler0(); + } + + /** + * A structure for describing the save point in the Write Batch. + */ + public static class SavePoint { + private long size; + private long count; + private long contentFlags; + + public SavePoint(final long size, final long count, + final long contentFlags) { + this.size = size; + this.count = count; + this.contentFlags = contentFlags; + } + + public void clear() { + this.size = 0; + this.count = 0; + this.contentFlags = 0; + } + + /** + * Get the size of the serialized representation. + * + * @return the size of the serialized representation. + */ + public long getSize() { + return size; + } + + /** + * Get the number of elements. + * + * @return the number of elements. + */ + public long getCount() { + return count; + } + + /** + * Get the content flags. + * + * @return the content flags. + */ + public long getContentFlags() { + return contentFlags; + } + + public boolean isCleared() { + return (size | count | contentFlags) == 0; + } + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java new file mode 100644 index 000000000..92caa22b3 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java @@ -0,0 +1,283 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + *

Defines the interface for a Write Batch which + * holds a collection of updates to apply atomically to a DB.

+ */ +public interface WriteBatchInterface { + + /** + * Returns the number of updates in the batch. + * + * @return number of items in WriteBatch + */ + int count(); + + /** + *

Store the mapping "key->value" in the database.

+ * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void put(byte[] key, byte[] value) throws RocksDBException; + + /** + *

Store the mapping "key->value" within given column + * family.

+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) + throws RocksDBException; + + /** + *

Store the mapping "key->value" within given column + * family.

+ * + * @param key the specified key to be inserted. It is using position and limit. + * Supports direct buffer only. + * @param value the value associated with the specified key. It is using position and limit. + * Supports direct buffer only. + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void put(final ByteBuffer key, final ByteBuffer value) throws RocksDBException; + + /** + *

Store the mapping "key->value" within given column + * family.

+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the specified key to be inserted. It is using position and limit. + * Supports direct buffer only. + * @param value the value associated with the specified key. It is using position and limit. + * Supports direct buffer only. + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void put(ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key, final ByteBuffer value) + throws RocksDBException; + + /** + *

Merge "value" with the existing value of "key" in the database. + * "key->merge(existing, value)"

+ * + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void merge(byte[] key, byte[] value) throws RocksDBException; + + /** + *

Merge "value" with the existing value of "key" in given column family. + * "key->merge(existing, value)"

+ * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) + throws RocksDBException; + + /** + *

If the database contains a mapping for "key", erase it. Else do nothing.

+ * + * @param key Key to delete within database + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void delete(byte[] key) throws RocksDBException; + + /** + *

If column family contains a mapping for "key", erase it. Else do nothing.

+ * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key Key to delete within database + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void delete(ColumnFamilyHandle columnFamilyHandle, byte[] key) throws RocksDBException; + + /** + *

If column family contains a mapping for "key", erase it. Else do nothing.

+ * + * @param key Key to delete within database. It is using position and limit. + * Supports direct buffer only. + * + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void delete(final ByteBuffer key) throws RocksDBException; + + /** + *

If column family contains a mapping for "key", erase it. Else do nothing.

+ * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key Key to delete within database. It is using position and limit. + * Supports direct buffer only. + * + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void delete(ColumnFamilyHandle columnFamilyHandle, final ByteBuffer key) + throws RocksDBException; + + /** + * Remove the database entry for {@code key}. Requires that the key exists + * and was not overwritten. It is not an error if the key did not exist + * in the database. + * + * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple + * times), then the result of calling SingleDelete() on this key is undefined. + * SingleDelete() only behaves correctly if there has been only one Put() + * for this key since the previous call to SingleDelete() for this key. + * + * This feature is currently an experimental performance optimization + * for a very specific workload. It is up to the caller to ensure that + * SingleDelete is only used for a key that is not deleted using Delete() or + * written using Merge(). Mixing SingleDelete operations with Deletes and + * Merges can result in undefined behavior. + * + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + @Experimental("Performance optimization for a very specific workload") + void singleDelete(final byte[] key) throws RocksDBException; + + /** + * Remove the database entry for {@code key}. Requires that the key exists + * and was not overwritten. It is not an error if the key did not exist + * in the database. + * + * If a key is overwritten (by calling {@link #put(byte[], byte[])} multiple + * times), then the result of calling SingleDelete() on this key is undefined. + * SingleDelete() only behaves correctly if there has been only one Put() + * for this key since the previous call to SingleDelete() for this key. + * + * This feature is currently an experimental performance optimization + * for a very specific workload. It is up to the caller to ensure that + * SingleDelete is only used for a key that is not deleted using Delete() or + * written using Merge(). Mixing SingleDelete operations with Deletes and + * Merges can result in undefined behavior. + * + * @param columnFamilyHandle The column family to delete the key from + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + @Experimental("Performance optimization for a very specific workload") + void singleDelete(final ColumnFamilyHandle columnFamilyHandle, final byte[] key) + throws RocksDBException; + + /** + * Removes the database entries in the range ["beginKey", "endKey"), i.e., + * including "beginKey" and excluding "endKey". a non-OK status on error. It + * is not an error if no keys exist in the range ["beginKey", "endKey"). + * + * Delete the database entry (if any) for "key". Returns OK on success, and a + * non-OK status on error. It is not an error if "key" did not exist in the + * database. + * + * @param beginKey + * First key to delete within database (included) + * @param endKey + * Last key to delete within database (excluded) + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void deleteRange(byte[] beginKey, byte[] endKey) throws RocksDBException; + + /** + * Removes the database entries in the range ["beginKey", "endKey"), i.e., + * including "beginKey" and excluding "endKey". a non-OK status on error. It + * is not an error if no keys exist in the range ["beginKey", "endKey"). + * + * Delete the database entry (if any) for "key". Returns OK on success, and a + * non-OK status on error. It is not an error if "key" did not exist in the + * database. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param beginKey + * First key to delete within database (included) + * @param endKey + * Last key to delete within database (excluded) + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void deleteRange(ColumnFamilyHandle columnFamilyHandle, byte[] beginKey, byte[] endKey) + throws RocksDBException; + + /** + * Append a blob of arbitrary size to the records in this batch. The blob will + * be stored in the transaction log but not in any other file. In particular, + * it will not be persisted to the SST files. When iterating over this + * WriteBatch, WriteBatch::Handler::LogData will be called with the contents + * of the blob as it is encountered. Blobs, puts, deletes, and merges will be + * encountered in the same order in thich they were inserted. The blob will + * NOT consume sequence number(s) and will NOT increase the count of the batch + * + * Example application: add timestamps to the transaction log for use in + * replication. + * + * @param blob binary object to be inserted + * @throws RocksDBException thrown if error happens in underlying native library. + */ + void putLogData(byte[] blob) throws RocksDBException; + + /** + * Clear all updates buffered in this batch + */ + void clear(); + + /** + * Records the state of the batch for future calls to RollbackToSavePoint(). + * May be called multiple times to set multiple save points. + */ + void setSavePoint(); + + /** + * Remove all entries in this batch (Put, Merge, Delete, PutLogData) since + * the most recent call to SetSavePoint() and removes the most recent save + * point. + * + * @throws RocksDBException if there is no previous call to SetSavePoint() + */ + void rollbackToSavePoint() throws RocksDBException; + + /** + * Pop the most recent save point. + * + * That is to say that it removes the last save point, + * which was set by {@link #setSavePoint()}. + * + * @throws RocksDBException If there is no previous call to + * {@link #setSavePoint()}, an exception with + * {@link Status.Code#NotFound} will be thrown. + */ + void popSavePoint() throws RocksDBException; + + /** + * Set the maximum size of the write batch. + * + * @param maxBytes the maximum size in bytes. + */ + void setMaxBytes(long maxBytes); + + /** + * Get the underlying Write Batch. + * + * @return the underlying WriteBatch. + */ + WriteBatch getWriteBatch(); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java new file mode 100644 index 000000000..c73bd7dda --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java @@ -0,0 +1,361 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + * Similar to {@link org.rocksdb.WriteBatch} but with a binary searchable + * index built for all the keys inserted. + * + * Calling put, merge, remove or putLogData calls the same function + * as with {@link org.rocksdb.WriteBatch} whilst also building an index. + * + * A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator()} to + * create an iterator over the write batch or + * {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)} + * to get an iterator for the database with Read-Your-Own-Writes like capability + */ +public class WriteBatchWithIndex extends AbstractWriteBatch { + /** + * Creates a WriteBatchWithIndex where no bytes + * are reserved up-front, bytewise comparison is + * used for fallback key comparisons, + * and duplicate keys operations are retained + */ + public WriteBatchWithIndex() { + super(newWriteBatchWithIndex()); + } + + + /** + * Creates a WriteBatchWithIndex where no bytes + * are reserved up-front, bytewise comparison is + * used for fallback key comparisons, and duplicate key + * assignment is determined by the constructor argument + * + * @param overwriteKey if true, overwrite the key in the index when + * inserting a duplicate key, in this way an iterator will never + * show two entries with the same key. + */ + public WriteBatchWithIndex(final boolean overwriteKey) { + super(newWriteBatchWithIndex(overwriteKey)); + } + + /** + * Creates a WriteBatchWithIndex + * + * @param fallbackIndexComparator We fallback to this comparator + * to compare keys within a column family if we cannot determine + * the column family and so look up it's comparator. + * + * @param reservedBytes reserved bytes in underlying WriteBatch + * + * @param overwriteKey if true, overwrite the key in the index when + * inserting a duplicate key, in this way an iterator will never + * show two entries with the same key. + */ + public WriteBatchWithIndex( + final AbstractComparator + fallbackIndexComparator, final int reservedBytes, + final boolean overwriteKey) { + super(newWriteBatchWithIndex(fallbackIndexComparator.nativeHandle_, + fallbackIndexComparator.getComparatorType().getValue(), reservedBytes, + overwriteKey)); + } + + /** + *

Private WriteBatchWithIndex constructor which is used to construct + * WriteBatchWithIndex instances from C++ side. As the reference to this + * object is also managed from C++ side the handle will be disowned.

+ * + * @param nativeHandle address of native instance. + */ + WriteBatchWithIndex(final long nativeHandle) { + super(nativeHandle); + disOwnNativeHandle(); + } + + /** + * Create an iterator of a column family. User can call + * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to + * search to the next entry of or after a key. Keys will be iterated in the + * order given by index_comparator. For multiple updates on the same key, + * each update will be returned as a separate entry, in the order of update + * time. + * + * @param columnFamilyHandle The column family to iterate over + * @return An iterator for the Write Batch contents, restricted to the column + * family + */ + public WBWIRocksIterator newIterator( + final ColumnFamilyHandle columnFamilyHandle) { + return new WBWIRocksIterator(this, iterator1(nativeHandle_, + columnFamilyHandle.nativeHandle_)); + } + + /** + * Create an iterator of the default column family. User can call + * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to + * search to the next entry of or after a key. Keys will be iterated in the + * order given by index_comparator. For multiple updates on the same key, + * each update will be returned as a separate entry, in the order of update + * time. + * + * @return An iterator for the Write Batch contents + */ + public WBWIRocksIterator newIterator() { + return new WBWIRocksIterator(this, iterator0(nativeHandle_)); + } + + /** + * Provides Read-Your-Own-Writes like functionality by + * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} + * as a delta and baseIterator as a base + * + * Updating write batch with the current key of the iterator is not safe. + * We strongly recommend users not to do it. It will invalidate the current + * key() and value() of the iterator. This invalidation happens even before + * the write batch update finishes. The state may recover after Next() is + * called. + * + * @param columnFamilyHandle The column family to iterate over + * @param baseIterator The base iterator, + * e.g. {@link org.rocksdb.RocksDB#newIterator()} + * @return An iterator which shows a view comprised of both the database + * point-in-time from baseIterator and modifications made in this write batch. + */ + public RocksIterator newIteratorWithBase( + final ColumnFamilyHandle columnFamilyHandle, + final RocksIterator baseIterator) { + return newIteratorWithBase(columnFamilyHandle, baseIterator, null); + } + + /** + * Provides Read-Your-Own-Writes like functionality by + * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} + * as a delta and baseIterator as a base + * + * Updating write batch with the current key of the iterator is not safe. + * We strongly recommend users not to do it. It will invalidate the current + * key() and value() of the iterator. This invalidation happens even before + * the write batch update finishes. The state may recover after Next() is + * called. + * + * @param columnFamilyHandle The column family to iterate over + * @param baseIterator The base iterator, + * e.g. {@link org.rocksdb.RocksDB#newIterator()} + * @param readOptions the read options, or null + * @return An iterator which shows a view comprised of both the database + * point-in-time from baseIterator and modifications made in this write batch. + */ + public RocksIterator newIteratorWithBase(final ColumnFamilyHandle columnFamilyHandle, + final RocksIterator baseIterator, /* @Nullable */ final ReadOptions readOptions) { + final RocksIterator iterator = new RocksIterator(baseIterator.parent_, + iteratorWithBase(nativeHandle_, columnFamilyHandle.nativeHandle_, + baseIterator.nativeHandle_, readOptions == null ? 0 : readOptions.nativeHandle_)); + + // when the iterator is deleted it will also delete the baseIterator + baseIterator.disOwnNativeHandle(); + + return iterator; + } + + /** + * Provides Read-Your-Own-Writes like functionality by + * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} + * as a delta and baseIterator as a base. Operates on the default column + * family. + * + * @param baseIterator The base iterator, + * e.g. {@link org.rocksdb.RocksDB#newIterator()} + * @return An iterator which shows a view comprised of both the database + * point-in-timefrom baseIterator and modifications made in this write batch. + */ + public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) { + return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator, null); + } + + /** + * Provides Read-Your-Own-Writes like functionality by + * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} + * as a delta and baseIterator as a base. Operates on the default column + * family. + * + * @param baseIterator The base iterator, + * e.g. {@link org.rocksdb.RocksDB#newIterator()} + * @param readOptions the read options, or null + * @return An iterator which shows a view comprised of both the database + * point-in-timefrom baseIterator and modifications made in this write batch. + */ + public RocksIterator newIteratorWithBase(final RocksIterator baseIterator, + /* @Nullable */ final ReadOptions readOptions) { + return newIteratorWithBase( + baseIterator.parent_.getDefaultColumnFamily(), baseIterator, readOptions); + } + + /** + * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will only + * read the key from this batch. + * + * @param columnFamilyHandle The column family to retrieve the value from + * @param options The database options to use + * @param key The key to read the value for + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException if the batch does not have enough data to resolve + * Merge operations, MergeInProgress status may be returned. + */ + public byte[] getFromBatch(final ColumnFamilyHandle columnFamilyHandle, + final DBOptions options, final byte[] key) throws RocksDBException { + return getFromBatch(nativeHandle_, options.nativeHandle_, + key, key.length, columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#get(byte[])} but will only + * read the key from this batch. + * + * @param options The database options to use + * @param key The key to read the value for + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException if the batch does not have enough data to resolve + * Merge operations, MergeInProgress status may be returned. + */ + public byte[] getFromBatch(final DBOptions options, final byte[] key) + throws RocksDBException { + return getFromBatch(nativeHandle_, options.nativeHandle_, key, key.length); + } + + /** + * Similar to {@link RocksDB#get(ColumnFamilyHandle, byte[])} but will also + * read writes from this batch. + * + * This function will query both this batch and the DB and then merge + * the results using the DB's merge operator (if the batch contains any + * merge requests). + * + * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is + * read from the DB but will NOT change which keys are read from the batch + * (the keys in this batch do not yet belong to any snapshot and will be + * fetched regardless). + * + * @param db The Rocks database + * @param columnFamilyHandle The column family to retrieve the value from + * @param options The read options to use + * @param key The key to read the value for + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException if the value for the key cannot be read + */ + public byte[] getFromBatchAndDB(final RocksDB db, final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions options, final byte[] key) throws RocksDBException { + return getFromBatchAndDB(nativeHandle_, db.nativeHandle_, + options.nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Similar to {@link RocksDB#get(byte[])} but will also + * read writes from this batch. + * + * This function will query both this batch and the DB and then merge + * the results using the DB's merge operator (if the batch contains any + * merge requests). + * + * Setting {@link ReadOptions#setSnapshot(Snapshot)} will affect what is + * read from the DB but will NOT change which keys are read from the batch + * (the keys in this batch do not yet belong to any snapshot and will be + * fetched regardless). + * + * @param db The Rocks database + * @param options The read options to use + * @param key The key to read the value for + * + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException if the value for the key cannot be read + */ + public byte[] getFromBatchAndDB(final RocksDB db, final ReadOptions options, + final byte[] key) throws RocksDBException { + return getFromBatchAndDB(nativeHandle_, db.nativeHandle_, + options.nativeHandle_, key, key.length); + } + + @Override protected final native void disposeInternal(final long handle); + @Override final native int count0(final long handle); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override + final native void putDirect(final long handle, final ByteBuffer key, final int keyOffset, + final int keyLength, final ByteBuffer value, final int valueOffset, final int valueLength, + final long cfHandle); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override final native void delete(final long handle, final byte[] key, + final int keyLen) throws RocksDBException; + @Override final native void delete(final long handle, final byte[] key, + final int keyLen, final long cfHandle) throws RocksDBException; + @Override final native void singleDelete(final long handle, final byte[] key, + final int keyLen) throws RocksDBException; + @Override final native void singleDelete(final long handle, final byte[] key, + final int keyLen, final long cfHandle) throws RocksDBException; + @Override + final native void deleteDirect(final long handle, final ByteBuffer key, final int keyOffset, + final int keyLength, final long cfHandle) throws RocksDBException; + // DO NOT USE - `WriteBatchWithIndex::deleteRange` is not yet supported + @Override + final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen, + final byte[] endKey, final int endKeyLen); + // DO NOT USE - `WriteBatchWithIndex::deleteRange` is not yet supported + @Override + final native void deleteRange(final long handle, final byte[] beginKey, final int beginKeyLen, + final byte[] endKey, final int endKeyLen, final long cfHandle); + @Override final native void putLogData(final long handle, final byte[] blob, + final int blobLen) throws RocksDBException; + @Override final native void clear0(final long handle); + @Override final native void setSavePoint0(final long handle); + @Override final native void rollbackToSavePoint0(final long handle); + @Override final native void popSavePoint(final long handle) throws RocksDBException; + @Override final native void setMaxBytes(final long nativeHandle, + final long maxBytes); + @Override final native WriteBatch getWriteBatch(final long handle); + + private native static long newWriteBatchWithIndex(); + private native static long newWriteBatchWithIndex(final boolean overwriteKey); + private native static long newWriteBatchWithIndex( + final long fallbackIndexComparatorHandle, + final byte comparatorType, final int reservedBytes, + final boolean overwriteKey); + private native long iterator0(final long handle); + private native long iterator1(final long handle, final long cfHandle); + private native long iteratorWithBase(final long handle, final long baseIteratorHandle, + final long cfHandle, final long readOptionsHandle); + private native byte[] getFromBatch(final long handle, final long optHandle, + final byte[] key, final int keyLen); + private native byte[] getFromBatch(final long handle, final long optHandle, + final byte[] key, final int keyLen, final long cfHandle); + private native byte[] getFromBatchAndDB(final long handle, + final long dbHandle, final long readOptHandle, final byte[] key, + final int keyLen); + private native byte[] getFromBatchAndDB(final long handle, + final long dbHandle, final long readOptHandle, final byte[] key, + final int keyLen, final long cfHandle); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteBufferManager.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBufferManager.java new file mode 100644 index 000000000..8ec963958 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteBufferManager.java @@ -0,0 +1,50 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Java wrapper over native write_buffer_manager class + */ +public class WriteBufferManager extends RocksObject { + static { + RocksDB.loadLibrary(); + } + + /** + * Construct a new instance of WriteBufferManager. + * + * Check + * https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager + * for more details on when to use it + * + * @param bufferSizeBytes buffer size(in bytes) to use for native write_buffer_manager + * @param cache cache whose memory should be bounded by this write buffer manager + * @param allowStall if set true, it will enable stalling of writes when memory_usage() exceeds + * buffer_size. + * It will wait for flush to complete and memory usage to drop down. + */ + public WriteBufferManager( + final long bufferSizeBytes, final Cache cache, final boolean allowStall) { + super(newWriteBufferManager(bufferSizeBytes, cache.nativeHandle_, allowStall)); + this.allowStall_ = allowStall; + } + + public WriteBufferManager(final long bufferSizeBytes, final Cache cache){ + this(bufferSizeBytes, cache, false); + } + + public boolean allowStall() { + return allowStall_; + } + + private native static long newWriteBufferManager( + final long bufferSizeBytes, final long cacheHandle, final boolean allowStall); + + @Override + protected native void disposeInternal(final long handle); + + private boolean allowStall_; +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java new file mode 100644 index 000000000..5a3ffa6c5 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java @@ -0,0 +1,256 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Options that control write operations. + * + * Note that developers should call WriteOptions.dispose() to release the + * c++ side memory before a WriteOptions instance runs out of scope. + */ +public class WriteOptions extends RocksObject { + /** + * Construct WriteOptions instance. + */ + public WriteOptions() { + super(newWriteOptions()); + + } + + // TODO(AR) consider ownership + WriteOptions(final long nativeHandle) { + super(nativeHandle); + disOwnNativeHandle(); + } + + /** + * Copy constructor for WriteOptions. + * + * NOTE: This does a shallow copy, which means comparator, merge_operator, compaction_filter, + * compaction_filter_factory and other pointers will be cloned! + * + * @param other The ColumnFamilyOptions to copy. + */ + public WriteOptions(WriteOptions other) { + super(copyWriteOptions(other.nativeHandle_)); + } + + + /** + * If true, the write will be flushed from the operating system + * buffer cache (by calling WritableFile::Sync()) before the write + * is considered complete. If this flag is true, writes will be + * slower. + * + * If this flag is false, and the machine crashes, some recent + * writes may be lost. Note that if it is just the process that + * crashes (i.e., the machine does not reboot), no writes will be + * lost even if sync==false. + * + * In other words, a DB write with sync==false has similar + * crash semantics as the "write()" system call. A DB write + * with sync==true has similar crash semantics to a "write()" + * system call followed by "fdatasync()". + * + * Default: false + * + * @param flag a boolean flag to indicate whether a write + * should be synchronized. + * @return the instance of the current WriteOptions. + */ + public WriteOptions setSync(final boolean flag) { + setSync(nativeHandle_, flag); + return this; + } + + /** + * If true, the write will be flushed from the operating system + * buffer cache (by calling WritableFile::Sync()) before the write + * is considered complete. If this flag is true, writes will be + * slower. + * + * If this flag is false, and the machine crashes, some recent + * writes may be lost. Note that if it is just the process that + * crashes (i.e., the machine does not reboot), no writes will be + * lost even if sync==false. + * + * In other words, a DB write with sync==false has similar + * crash semantics as the "write()" system call. A DB write + * with sync==true has similar crash semantics to a "write()" + * system call followed by "fdatasync()". + * + * @return boolean value indicating if sync is active. + */ + public boolean sync() { + return sync(nativeHandle_); + } + + /** + * If true, writes will not first go to the write ahead log, + * and the write may got lost after a crash. The backup engine + * relies on write-ahead logs to back up the memtable, so if + * you disable write-ahead logs, you must create backups with + * flush_before_backup=true to avoid losing unflushed memtable data. + * + * @param flag a boolean flag to specify whether to disable + * write-ahead-log on writes. + * @return the instance of the current WriteOptions. + */ + public WriteOptions setDisableWAL(final boolean flag) { + setDisableWAL(nativeHandle_, flag); + return this; + } + + /** + * If true, writes will not first go to the write ahead log, + * and the write may got lost after a crash. The backup engine + * relies on write-ahead logs to back up the memtable, so if + * you disable write-ahead logs, you must create backups with + * flush_before_backup=true to avoid losing unflushed memtable data. + * + * @return boolean value indicating if WAL is disabled. + */ + public boolean disableWAL() { + return disableWAL(nativeHandle_); + } + + /** + * If true and if user is trying to write to column families that don't exist + * (they were dropped), ignore the write (don't return an error). If there + * are multiple writes in a WriteBatch, other writes will succeed. + * + * Default: false + * + * @param ignoreMissingColumnFamilies true to ignore writes to column families + * which don't exist + * @return the instance of the current WriteOptions. + */ + public WriteOptions setIgnoreMissingColumnFamilies( + final boolean ignoreMissingColumnFamilies) { + setIgnoreMissingColumnFamilies(nativeHandle_, ignoreMissingColumnFamilies); + return this; + } + + /** + * If true and if user is trying to write to column families that don't exist + * (they were dropped), ignore the write (don't return an error). If there + * are multiple writes in a WriteBatch, other writes will succeed. + * + * Default: false + * + * @return true if writes to column families which don't exist are ignored + */ + public boolean ignoreMissingColumnFamilies() { + return ignoreMissingColumnFamilies(nativeHandle_); + } + + /** + * If true and we need to wait or sleep for the write request, fails + * immediately with {@link Status.Code#Incomplete}. + * + * @param noSlowdown true to fail write requests if we need to wait or sleep + * @return the instance of the current WriteOptions. + */ + public WriteOptions setNoSlowdown(final boolean noSlowdown) { + setNoSlowdown(nativeHandle_, noSlowdown); + return this; + } + + /** + * If true and we need to wait or sleep for the write request, fails + * immediately with {@link Status.Code#Incomplete}. + * + * @return true when write requests are failed if we need to wait or sleep + */ + public boolean noSlowdown() { + return noSlowdown(nativeHandle_); + } + + /** + * If true, this write request is of lower priority if compaction is + * behind. In the case that, {@link #noSlowdown()} == true, the request + * will be cancelled immediately with {@link Status.Code#Incomplete} returned. + * Otherwise, it will be slowed down. The slowdown value is determined by + * RocksDB to guarantee it introduces minimum impacts to high priority writes. + * + * Default: false + * + * @param lowPri true if the write request should be of lower priority than + * compactions which are behind. + * + * @return the instance of the current WriteOptions. + */ + public WriteOptions setLowPri(final boolean lowPri) { + setLowPri(nativeHandle_, lowPri); + return this; + } + + /** + * Returns true if this write request is of lower priority if compaction is + * behind. + * + * See {@link #setLowPri(boolean)}. + * + * @return true if this write request is of lower priority, false otherwise. + */ + public boolean lowPri() { + return lowPri(nativeHandle_); + } + + /** + * If true, this writebatch will maintain the last insert positions of each + * memtable as hints in concurrent write. It can improve write performance + * in concurrent writes if keys in one writebatch are sequential. In + * non-concurrent writes (when {@code concurrent_memtable_writes} is false) this + * option will be ignored. + * + * Default: false + * + * @return true if writebatch will maintain the last insert positions of each memtable as hints in + * concurrent write. + */ + public boolean memtableInsertHintPerBatch() { + return memtableInsertHintPerBatch(nativeHandle_); + } + + /** + * If true, this writebatch will maintain the last insert positions of each + * memtable as hints in concurrent write. It can improve write performance + * in concurrent writes if keys in one writebatch are sequential. In + * non-concurrent writes (when {@code concurrent_memtable_writes} is false) this + * option will be ignored. + * + * Default: false + * + * @param memtableInsertHintPerBatch true if writebatch should maintain the last insert positions + * of each memtable as hints in concurrent write. + * @return the instance of the current WriteOptions. + */ + public WriteOptions setMemtableInsertHintPerBatch(final boolean memtableInsertHintPerBatch) { + setMemtableInsertHintPerBatch(nativeHandle_, memtableInsertHintPerBatch); + return this; + } + + private native static long newWriteOptions(); + private native static long copyWriteOptions(long handle); + @Override protected final native void disposeInternal(final long handle); + + private native void setSync(long handle, boolean flag); + private native boolean sync(long handle); + private native void setDisableWAL(long handle, boolean flag); + private native boolean disableWAL(long handle); + private native void setIgnoreMissingColumnFamilies(final long handle, + final boolean ignoreMissingColumnFamilies); + private native boolean ignoreMissingColumnFamilies(final long handle); + private native void setNoSlowdown(final long handle, + final boolean noSlowdown); + private native boolean noSlowdown(final long handle); + private native void setLowPri(final long handle, final boolean lowPri); + private native boolean lowPri(final long handle); + private native boolean memtableInsertHintPerBatch(final long handle); + private native void setMemtableInsertHintPerBatch( + final long handle, final boolean memtableInsertHintPerBatch); +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteStallCondition.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteStallCondition.java new file mode 100644 index 000000000..3bc9d4104 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteStallCondition.java @@ -0,0 +1,44 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public enum WriteStallCondition { + NORMAL((byte) 0x0), + DELAYED((byte) 0x1), + STOPPED((byte) 0x2); + + private final byte value; + + WriteStallCondition(final byte value) { + this.value = value; + } + + /** + * Get the internal representation. + * + * @return the internal representation + */ + byte getValue() { + return value; + } + + /** + * Get the WriteStallCondition from the internal representation value. + * + * @return the flush reason. + * + * @throws IllegalArgumentException if the value is unknown. + */ + static WriteStallCondition fromValue(final byte value) { + for (final WriteStallCondition writeStallCondition : WriteStallCondition.values()) { + if (writeStallCondition.value == value) { + return writeStallCondition; + } + } + + throw new IllegalArgumentException("Illegal value provided for WriteStallCondition: " + value); + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/WriteStallInfo.java b/src/rocksdb/java/src/main/java/org/rocksdb/WriteStallInfo.java new file mode 100644 index 000000000..4aef0eda9 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/WriteStallInfo.java @@ -0,0 +1,75 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Objects; + +public class WriteStallInfo { + private final String columnFamilyName; + private final WriteStallCondition currentCondition; + private final WriteStallCondition previousCondition; + + /** + * Access is package private as this will only be constructed from + * C++ via JNI and for testing. + */ + WriteStallInfo(final String columnFamilyName, final byte currentConditionValue, + final byte previousConditionValue) { + this.columnFamilyName = columnFamilyName; + this.currentCondition = WriteStallCondition.fromValue(currentConditionValue); + this.previousCondition = WriteStallCondition.fromValue(previousConditionValue); + } + + /** + * Get the name of the column family. + * + * @return the name of the column family. + */ + public String getColumnFamilyName() { + return columnFamilyName; + } + + /** + * Get the current state of the write controller. + * + * @return the current state. + */ + public WriteStallCondition getCurrentCondition() { + return currentCondition; + } + + /** + * Get the previous state of the write controller. + * + * @return the previous state. + */ + public WriteStallCondition getPreviousCondition() { + return previousCondition; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + WriteStallInfo that = (WriteStallInfo) o; + return Objects.equals(columnFamilyName, that.columnFamilyName) + && currentCondition == that.currentCondition && previousCondition == that.previousCondition; + } + + @Override + public int hashCode() { + return Objects.hash(columnFamilyName, currentCondition, previousCondition); + } + + @Override + public String toString() { + return "WriteStallInfo{" + + "columnFamilyName='" + columnFamilyName + '\'' + ", currentCondition=" + currentCondition + + ", previousCondition=" + previousCondition + '}'; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/ByteUtil.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/ByteUtil.java new file mode 100644 index 000000000..5d64d5dcf --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/ByteUtil.java @@ -0,0 +1,52 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import java.nio.ByteBuffer; + +import static java.nio.charset.StandardCharsets.UTF_8; + +public class ByteUtil { + + /** + * Convert a String to a UTF-8 byte array. + * + * @param str the string + * + * @return the byte array. + */ + public static byte[] bytes(final String str) { + return str.getBytes(UTF_8); + } + + /** + * Compares the first {@code count} bytes of two areas of memory. Returns + * zero if they are the same, a value less than zero if {@code x} is + * lexically less than {@code y}, or a value greater than zero if {@code x} + * is lexically greater than {@code y}. Note that lexical order is determined + * as if comparing unsigned char arrays. + * + * Similar to memcmp.c. + * + * @param x the first value to compare with + * @param y the second value to compare against + * @param count the number of bytes to compare + * + * @return the result of the comparison + */ + public static int memcmp(final ByteBuffer x, final ByteBuffer y, + final int count) { + for (int idx = 0; idx < count; idx++) { + final int aa = x.get(idx) & 0xff; + final int bb = y.get(idx) & 0xff; + if (aa != bb) { + return aa - bb; + } + } + return 0; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java new file mode 100644 index 000000000..9561b0a31 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java @@ -0,0 +1,121 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import org.rocksdb.*; + +import java.nio.ByteBuffer; + +import static org.rocksdb.util.ByteUtil.memcmp; + +/** + * This is a Java Native implementation of the C++ + * equivalent BytewiseComparatorImpl using {@link Slice} + * + * The performance of Comparators implemented in Java is always + * less than their C++ counterparts due to the bridging overhead, + * as such you likely don't want to use this apart from benchmarking + * and you most likely instead wanted + * {@link org.rocksdb.BuiltinComparator#BYTEWISE_COMPARATOR} + */ +public final class BytewiseComparator extends AbstractComparator { + + public BytewiseComparator(final ComparatorOptions copt) { + super(copt); + } + + @Override + public String name() { + return "rocksdb.java.BytewiseComparator"; + } + + @Override + public int compare(final ByteBuffer a, final ByteBuffer b) { + return _compare(a, b); + } + + static int _compare(final ByteBuffer a, final ByteBuffer b) { + assert(a != null && b != null); + final int minLen = a.remaining() < b.remaining() ? + a.remaining() : b.remaining(); + int r = memcmp(a, b, minLen); + if (r == 0) { + if (a.remaining() < b.remaining()) { + r = -1; + } else if (a.remaining() > b.remaining()) { + r = +1; + } + } + return r; + } + + @Override + public void findShortestSeparator(final ByteBuffer start, + final ByteBuffer limit) { + // Find length of common prefix + final int minLength = Math.min(start.remaining(), limit.remaining()); + int diffIndex = 0; + while (diffIndex < minLength && + start.get(diffIndex) == limit.get(diffIndex)) { + diffIndex++; + } + + if (diffIndex >= minLength) { + // Do not shorten if one string is a prefix of the other + } else { + final int startByte = start.get(diffIndex) & 0xff; + final int limitByte = limit.get(diffIndex) & 0xff; + if (startByte >= limitByte) { + // Cannot shorten since limit is smaller than start or start is + // already the shortest possible. + return; + } + assert(startByte < limitByte); + + if (diffIndex < limit.remaining() - 1 || startByte + 1 < limitByte) { + start.put(diffIndex, (byte)((start.get(diffIndex) & 0xff) + 1)); + start.limit(diffIndex + 1); + } else { + // v + // A A 1 A A A + // A A 2 + // + // Incrementing the current byte will make start bigger than limit, we + // will skip this byte, and find the first non 0xFF byte in start and + // increment it. + diffIndex++; + + while (diffIndex < start.remaining()) { + // Keep moving until we find the first non 0xFF byte to + // increment it + if ((start.get(diffIndex) & 0xff) < + 0xff) { + start.put(diffIndex, (byte)((start.get(diffIndex) & 0xff) + 1)); + start.limit(diffIndex + 1); + break; + } + diffIndex++; + } + } + assert(compare(start.duplicate(), limit.duplicate()) < 0); + } + } + + @Override + public void findShortSuccessor(final ByteBuffer key) { + // Find first character that can be incremented + final int n = key.remaining(); + for (int i = 0; i < n; i++) { + final int byt = key.get(i) & 0xff; + if (byt != 0xff) { + key.put(i, (byte)(byt + 1)); + key.limit(i+1); + return; + } + } + // *key is a run of 0xffs. Leave it alone. + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java new file mode 100644 index 000000000..9ad51c7c7 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java @@ -0,0 +1,245 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb.util; + +import java.io.File; +import java.io.IOException; + +public class Environment { + private static String OS = System.getProperty("os.name").toLowerCase(); + private static String ARCH = System.getProperty("os.arch").toLowerCase(); + private static String MUSL_ENVIRONMENT = System.getenv("ROCKSDB_MUSL_LIBC"); + + /** + * Will be lazily initialised by {@link #isMuslLibc()} instead of the previous static + * initialisation. The lazy initialisation prevents Windows from reporting suspicious behaviour of + * the JVM attempting IO on Unix paths. + */ + private static Boolean MUSL_LIBC = null; + + public static boolean isAarch64() { + return ARCH.contains("aarch64"); + } + + public static boolean isPowerPC() { + return ARCH.contains("ppc"); + } + + public static boolean isS390x() { + return ARCH.contains("s390x"); + } + + public static boolean isWindows() { + return (OS.contains("win")); + } + + public static boolean isFreeBSD() { + return (OS.contains("freebsd")); + } + + public static boolean isMac() { + return (OS.contains("mac")); + } + + public static boolean isAix() { + return OS.contains("aix"); + } + + public static boolean isUnix() { + return OS.contains("nix") || + OS.contains("nux"); + } + + /** + * Determine if the environment has a musl libc. + * + * @return true if the environment has a musl libc, false otherwise. + */ + public static boolean isMuslLibc() { + if (MUSL_LIBC == null) { + MUSL_LIBC = initIsMuslLibc(); + } + return MUSL_LIBC; + } + + /** + * Determine if the environment has a musl libc. + * + * The initialisation counterpart of {@link #isMuslLibc()}. + * + * Intentionally package-private for testing. + * + * @return true if the environment has a musl libc, false otherwise. + */ + static boolean initIsMuslLibc() { + // consider explicit user setting from environment first + if ("true".equalsIgnoreCase(MUSL_ENVIRONMENT)) { + return true; + } + if ("false".equalsIgnoreCase(MUSL_ENVIRONMENT)) { + return false; + } + + // check if ldd indicates a muslc lib + try { + final Process p = + new ProcessBuilder("/usr/bin/env", "sh", "-c", "ldd /usr/bin/env | grep -q musl").start(); + if (p.waitFor() == 0) { + return true; + } + } catch (final IOException | InterruptedException e) { + // do nothing, and move on to the next check + } + + final File lib = new File("/lib"); + if (lib.exists() && lib.isDirectory() && lib.canRead()) { + // attempt the most likely musl libc name first + final String possibleMuslcLibName; + if (isPowerPC()) { + possibleMuslcLibName = "libc.musl-ppc64le.so.1"; + } else if (isAarch64()) { + possibleMuslcLibName = "libc.musl-aarch64.so.1"; + } else if (isS390x()) { + possibleMuslcLibName = "libc.musl-s390x.so.1"; + } else { + possibleMuslcLibName = "libc.musl-x86_64.so.1"; + } + final File possibleMuslcLib = new File(lib, possibleMuslcLibName); + if (possibleMuslcLib.exists() && possibleMuslcLib.canRead()) { + return true; + } + + // fallback to scanning for a musl libc + final File[] libFiles = lib.listFiles(); + if (libFiles == null) { + return false; + } + for (final File f : libFiles) { + if (f.getName().startsWith("libc.musl")) { + return true; + } + } + } + + return false; + } + + public static boolean isSolaris() { + return OS.contains("sunos"); + } + + public static boolean isOpenBSD() { + return (OS.contains("openbsd")); + } + + public static boolean is64Bit() { + if (ARCH.indexOf("sparcv9") >= 0) { + return true; + } + return (ARCH.indexOf("64") > 0); + } + + public static String getSharedLibraryName(final String name) { + return name + "jni"; + } + + public static String getSharedLibraryFileName(final String name) { + return appendLibOsSuffix("lib" + getSharedLibraryName(name), true); + } + + /** + * Get the name of the libc implementation + * + * @return the name of the implementation, + * or null if the default for that platform (e.g. glibc on Linux). + */ + public static /* @Nullable */ String getLibcName() { + if (isMuslLibc()) { + return "musl"; + } else { + return null; + } + } + + private static String getLibcPostfix() { + final String libcName = getLibcName(); + if (libcName == null) { + return ""; + } + return "-" + libcName; + } + + public static String getJniLibraryName(final String name) { + if (isUnix()) { + final String arch = is64Bit() ? "64" : "32"; + if (isPowerPC() || isAarch64()) { + return String.format("%sjni-linux-%s%s", name, ARCH, getLibcPostfix()); + } else if (isS390x()) { + return String.format("%sjni-linux-%s", name, ARCH); + } else { + return String.format("%sjni-linux%s%s", name, arch, getLibcPostfix()); + } + } else if (isMac()) { + if (is64Bit()) { + final String arch; + if (isAarch64()) { + arch = "arm64"; + } else { + arch = "x86_64"; + } + return String.format("%sjni-osx-%s", name, arch); + } else { + return String.format("%sjni-osx", name); + } + } else if (isFreeBSD()) { + return String.format("%sjni-freebsd%s", name, is64Bit() ? "64" : "32"); + } else if (isAix() && is64Bit()) { + return String.format("%sjni-aix64", name); + } else if (isSolaris()) { + final String arch = is64Bit() ? "64" : "32"; + return String.format("%sjni-solaris%s", name, arch); + } else if (isWindows() && is64Bit()) { + return String.format("%sjni-win64", name); + } else if (isOpenBSD()) { + return String.format("%sjni-openbsd%s", name, is64Bit() ? "64" : "32"); + } + + throw new UnsupportedOperationException(String.format("Cannot determine JNI library name for ARCH='%s' OS='%s' name='%s'", ARCH, OS, name)); + } + + public static /*@Nullable*/ String getFallbackJniLibraryName(final String name) { + if (isMac() && is64Bit()) { + return String.format("%sjni-osx", name); + } + return null; + } + + public static String getJniLibraryFileName(final String name) { + return appendLibOsSuffix("lib" + getJniLibraryName(name), false); + } + + public static /*@Nullable*/ String getFallbackJniLibraryFileName(final String name) { + final String fallbackJniLibraryName = getFallbackJniLibraryName(name); + if (fallbackJniLibraryName == null) { + return null; + } + return appendLibOsSuffix("lib" + fallbackJniLibraryName, false); + } + + private static String appendLibOsSuffix(final String libraryFileName, final boolean shared) { + if (isUnix() || isAix() || isSolaris() || isFreeBSD() || isOpenBSD()) { + return libraryFileName + ".so"; + } else if (isMac()) { + return libraryFileName + (shared ? ".dylib" : ".jnilib"); + } else if (isWindows()) { + return libraryFileName + ".dll"; + } + throw new UnsupportedOperationException(); + } + + public static String getJniLibraryExtension() { + if (isWindows()) { + return ".dll"; + } + return (isMac()) ? ".jnilib" : ".so"; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/IntComparator.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/IntComparator.java new file mode 100644 index 000000000..cc096cd14 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/IntComparator.java @@ -0,0 +1,67 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import org.rocksdb.AbstractComparator; +import org.rocksdb.ComparatorOptions; + +import java.nio.ByteBuffer; + +/** + * This is a Java implementation of a Comparator for Java int + * keys. + * + * This comparator assumes keys are (at least) four bytes, so + * the caller must guarantee that in accessing other APIs in + * combination with this comparator. + * + * The performance of Comparators implemented in Java is always + * less than their C++ counterparts due to the bridging overhead, + * as such you likely don't want to use this apart from benchmarking + * or testing. + */ +public final class IntComparator extends AbstractComparator { + + public IntComparator(final ComparatorOptions copt) { + super(copt); + } + + @Override + public String name() { + return "rocksdb.java.IntComparator"; + } + + @Override + public int compare(final ByteBuffer a, final ByteBuffer b) { + return compareIntKeys(a, b); + } + + /** + * Compares integer keys + * so that they are in ascending order + * + * @param a 4-bytes representing an integer key + * @param b 4-bytes representing an integer key + * + * @return negative if a < b, 0 if a == b, positive otherwise + */ + private final int compareIntKeys(final ByteBuffer a, final ByteBuffer b) { + final int iA = a.getInt(); + final int iB = b.getInt(); + + // protect against int key calculation overflow + final long diff = (long)iA - iB; + final int result; + if (diff < Integer.MIN_VALUE) { + result = Integer.MIN_VALUE; + } else if(diff > Integer.MAX_VALUE) { + result = Integer.MAX_VALUE; + } else { + result = (int)diff; + } + return result; + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java new file mode 100644 index 000000000..4c06f80aa --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java @@ -0,0 +1,88 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import org.rocksdb.AbstractComparator; +import org.rocksdb.BuiltinComparator; +import org.rocksdb.ComparatorOptions; +import org.rocksdb.Slice; + +import java.nio.ByteBuffer; + +/** + * This is a Java Native implementation of the C++ + * equivalent ReverseBytewiseComparatorImpl using {@link Slice} + * + * The performance of Comparators implemented in Java is always + * less than their C++ counterparts due to the bridging overhead, + * as such you likely don't want to use this apart from benchmarking + * and you most likely instead wanted + * {@link BuiltinComparator#REVERSE_BYTEWISE_COMPARATOR} + */ +public final class ReverseBytewiseComparator extends AbstractComparator { + + public ReverseBytewiseComparator(final ComparatorOptions copt) { + super(copt); + } + + @Override + public String name() { + return "rocksdb.java.ReverseBytewiseComparator"; + } + + @Override + public int compare(final ByteBuffer a, final ByteBuffer b) { + return -BytewiseComparator._compare(a, b); + } + + @Override + public void findShortestSeparator(final ByteBuffer start, + final ByteBuffer limit) { + // Find length of common prefix + final int minLength = Math.min(start.remaining(), limit.remaining()); + int diffIndex = 0; + while (diffIndex < minLength && + start.get(diffIndex) == limit.get(diffIndex)) { + diffIndex++; + } + + assert(diffIndex <= minLength); + if (diffIndex == minLength) { + // Do not shorten if one string is a prefix of the other + // + // We could handle cases like: + // V + // A A 2 X Y + // A A 2 + // in a similar way as BytewiseComparator::FindShortestSeparator(). + // We keep it simple by not implementing it. We can come back to it + // later when needed. + } else { + final int startByte = start.get(diffIndex) & 0xff; + final int limitByte = limit.get(diffIndex) & 0xff; + if (startByte > limitByte && diffIndex < start.remaining() - 1) { + // Case like + // V + // A A 3 A A + // A A 1 B B + // + // or + // v + // A A 2 A A + // A A 1 B B + // In this case "AA2" will be good. +//#ifndef NDEBUG +// std::string old_start = *start; +//#endif + start.limit(diffIndex + 1); +//#ifndef NDEBUG +// assert(old_start >= *start); +//#endif + assert(BytewiseComparator._compare(start.duplicate(), limit.duplicate()) > 0); + } + } + } +} diff --git a/src/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java b/src/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java new file mode 100644 index 000000000..0f717e8d4 --- /dev/null +++ b/src/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java @@ -0,0 +1,16 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +public class SizeUnit { + public static final long KB = 1024L; + public static final long MB = KB * KB; + public static final long GB = KB * MB; + public static final long TB = KB * GB; + public static final long PB = KB * TB; + + private SizeUnit() {} +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/AbstractTransactionTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/AbstractTransactionTest.java new file mode 100644 index 000000000..46685f9fd --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/AbstractTransactionTest.java @@ -0,0 +1,965 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +/** + * Base class of {@link TransactionTest} and {@link OptimisticTransactionTest} + */ +public abstract class AbstractTransactionTest { + + protected final static byte[] TXN_TEST_COLUMN_FAMILY = "txn_test_cf" + .getBytes(); + + protected static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + public abstract DBContainer startDb() + throws RocksDBException; + + @Test + public void setSnapshot() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.setSnapshot(); + } + } + + @Test + public void setSnapshotOnNextOperation() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.setSnapshotOnNextOperation(); + txn.put("key1".getBytes(), "value1".getBytes()); + } + } + + @Test + public void setSnapshotOnNextOperation_transactionNotifier() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + + try(final TestTransactionNotifier notifier = new TestTransactionNotifier()) { + txn.setSnapshotOnNextOperation(notifier); + txn.put("key1".getBytes(), "value1".getBytes()); + + txn.setSnapshotOnNextOperation(notifier); + txn.put("key2".getBytes(), "value2".getBytes()); + + assertThat(notifier.getCreatedSnapshots().size()).isEqualTo(2); + } + } + } + + @Test + public void getSnapshot() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.setSnapshot(); + final Snapshot snapshot = txn.getSnapshot(); + assertThat(snapshot.isOwningHandle()).isFalse(); + } + } + + @Test + public void getSnapshot_null() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + final Snapshot snapshot = txn.getSnapshot(); + assertThat(snapshot).isNull(); + } + } + + @Test + public void clearSnapshot() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.setSnapshot(); + txn.clearSnapshot(); + } + } + + @Test + public void clearSnapshot_none() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.clearSnapshot(); + } + } + + @Test + public void commit() throws RocksDBException { + final byte k1[] = "rollback-key1".getBytes(UTF_8); + final byte v1[] = "rollback-value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb()) { + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + txn.commit(); + } + + try(final ReadOptions readOptions = new ReadOptions(); + final Transaction txn2 = dbContainer.beginTransaction()) { + assertThat(txn2.get(readOptions, k1)).isEqualTo(v1); + } + } + } + + @Test + public void rollback() throws RocksDBException { + final byte k1[] = "rollback-key1".getBytes(UTF_8); + final byte v1[] = "rollback-value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb()) { + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + txn.rollback(); + } + + try(final ReadOptions readOptions = new ReadOptions(); + final Transaction txn2 = dbContainer.beginTransaction()) { + assertThat(txn2.get(readOptions, k1)).isNull(); + } + } + } + + @Test + public void savePoint() throws RocksDBException { + final byte k1[] = "savePoint-key1".getBytes(UTF_8); + final byte v1[] = "savePoint-value1".getBytes(UTF_8); + final byte k2[] = "savePoint-key2".getBytes(UTF_8); + final byte v2[] = "savePoint-value2".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + + txn.setSavePoint(); + + txn.put(k2, v2); + + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + assertThat(txn.get(readOptions, k2)).isEqualTo(v2); + + txn.rollbackToSavePoint(); + + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + assertThat(txn.get(readOptions, k2)).isNull(); + + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + assertThat(txn2.get(readOptions, k1)).isEqualTo(v1); + assertThat(txn2.get(readOptions, k2)).isNull(); + } + } + } + + @Test + public void getPut_cf() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + assertThat(txn.get(testCf, readOptions, k1)).isNull(); + txn.put(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + } + } + + @Test + public void getPut() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.get(readOptions, k1)).isNull(); + txn.put(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + } + } + + @Test + public void multiGetPut_cf() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(new byte[][] { null, null }); + + txn.put(testCf, keys[0], values[0]); + txn.put(testCf, keys[1], values[1]); + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values); + } + } + + @Test + public void multiGetPutAsList_cf() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + assertThat(txn.multiGetAsList(readOptions, cfList, Arrays.asList(keys))) + .containsExactly(null, null); + + txn.put(testCf, keys[0], values[0]); + txn.put(testCf, keys[1], values[1]); + assertThat(txn.multiGetAsList(readOptions, cfList, Arrays.asList(keys))) + .containsExactly(values); + } + } + + @Test + public void multiGetPut() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(new byte[][] { null, null }); + + txn.put(keys[0], values[0]); + txn.put(keys[1], values[1]); + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values); + } + } + + @Test + public void multiGetPutAsList() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.multiGetAsList(readOptions, Arrays.asList(keys))).containsExactly(null, null); + + txn.put(keys[0], values[0]); + txn.put(keys[1], values[1]); + assertThat(txn.multiGetAsList(readOptions, Arrays.asList(keys))).containsExactly(values); + } + } + + @Test + public void getForUpdate_cf() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isNull(); + txn.put(testCf, k1, v1); + assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + } + } + + @Test + public void getForUpdate() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getForUpdate(readOptions, k1, true)).isNull(); + txn.put(k1, v1); + assertThat(txn.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + } + } + + @Test + public void multiGetForUpdate_cf() throws RocksDBException { + final byte keys[][] = new byte[][] { + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][] { + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + assertThat(txn.multiGetForUpdate(readOptions, cfList, keys)) + .isEqualTo(new byte[][] { null, null }); + + txn.put(testCf, keys[0], values[0]); + txn.put(testCf, keys[1], values[1]); + assertThat(txn.multiGetForUpdate(readOptions, cfList, keys)) + .isEqualTo(values); + } + } + + @Test + public void multiGetForUpdate() throws RocksDBException { + final byte keys[][] = new byte[][]{ + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][]{ + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.multiGetForUpdate(readOptions, keys)).isEqualTo(new byte[][]{null, null}); + + txn.put(keys[0], values[0]); + txn.put(keys[1], values[1]); + assertThat(txn.multiGetForUpdate(readOptions, keys)).isEqualTo(values); + } + } + + @Test + public void getIterator() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + txn.put(k1, v1); + + try(final RocksIterator iterator = txn.getIterator(readOptions)) { + iterator.seek(k1); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo(k1); + assertThat(iterator.value()).isEqualTo(v1); + } + } + } + + @Test + public void getIterator_cf() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + txn.put(testCf, k1, v1); + + try(final RocksIterator iterator = txn.getIterator(readOptions, testCf)) { + iterator.seek(k1); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo(k1); + assertThat(iterator.value()).isEqualTo(v1); + } + } + } + + @Test + public void merge_cf() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.merge(testCf, k1, v1); + } + } + + @Test + public void merge() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.merge(k1, v1); + } + } + + + @Test + public void delete_cf() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.put(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + + txn.delete(testCf, k1); + assertThat(txn.get(testCf, readOptions, k1)).isNull(); + } + } + + @Test + public void delete() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + + txn.delete(k1); + assertThat(txn.get(readOptions, k1)).isNull(); + } + } + + @Test + public void delete_parts_cf() throws RocksDBException { + final byte keyParts[][] = new byte[][] { + "ke".getBytes(UTF_8), + "y1".getBytes(UTF_8)}; + final byte valueParts[][] = new byte[][] { + "val".getBytes(UTF_8), + "ue1".getBytes(UTF_8)}; + final byte[] key = concat(keyParts); + final byte[] value = concat(valueParts); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.put(testCf, keyParts, valueParts); + assertThat(txn.get(testCf, readOptions, key)).isEqualTo(value); + + txn.delete(testCf, keyParts); + + assertThat(txn.get(testCf, readOptions, key)) + .isNull(); + } + } + + @Test + public void delete_parts() throws RocksDBException { + final byte keyParts[][] = new byte[][] { + "ke".getBytes(UTF_8), + "y1".getBytes(UTF_8)}; + final byte valueParts[][] = new byte[][] { + "val".getBytes(UTF_8), + "ue1".getBytes(UTF_8)}; + final byte[] key = concat(keyParts); + final byte[] value = concat(valueParts); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + + txn.put(keyParts, valueParts); + + assertThat(txn.get(readOptions, key)).isEqualTo(value); + + txn.delete(keyParts); + + assertThat(txn.get(readOptions, key)).isNull(); + } + } + + @Test + public void getPutUntracked_cf() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + assertThat(txn.get(testCf, readOptions, k1)).isNull(); + txn.putUntracked(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + } + } + + @Test + public void getPutUntracked() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.get(readOptions, k1)).isNull(); + txn.putUntracked(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + } + } + + @Deprecated + @Test + public void multiGetPutUntracked_cf() throws RocksDBException { + final byte keys[][] = new byte[][] { + "key1".getBytes(UTF_8), + "key2".getBytes(UTF_8)}; + final byte values[][] = new byte[][] { + "value1".getBytes(UTF_8), + "value2".getBytes(UTF_8)}; + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + + final List cfList = Arrays.asList(testCf, testCf); + + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(new byte[][] { null, null }); + txn.putUntracked(testCf, keys[0], values[0]); + txn.putUntracked(testCf, keys[1], values[1]); + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values); + } + } + + @Test + public void multiGetPutUntrackedAsList_cf() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + + final List cfList = Arrays.asList(testCf, testCf); + + assertThat(txn.multiGetAsList(readOptions, cfList, Arrays.asList(keys))) + .containsExactly(null, null); + txn.putUntracked(testCf, keys[0], values[0]); + txn.putUntracked(testCf, keys[1], values[1]); + assertThat(txn.multiGetAsList(readOptions, cfList, Arrays.asList(keys))) + .containsExactly(values); + } + } + + @Deprecated + @Test + public void multiGetPutUntracked() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(new byte[][] { null, null }); + txn.putUntracked(keys[0], values[0]); + txn.putUntracked(keys[1], values[1]); + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values); + } + } + + @Test + public void multiGetPutAsListUntracked() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.multiGetAsList(readOptions, Arrays.asList(keys))).containsExactly(null, null); + txn.putUntracked(keys[0], values[0]); + txn.putUntracked(keys[1], values[1]); + assertThat(txn.multiGetAsList(readOptions, Arrays.asList(keys))).containsExactly(values); + } + } + + @Test + public void mergeUntracked_cf() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.mergeUntracked(testCf, k1, v1); + } + } + + @Test + public void mergeUntracked() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.mergeUntracked(k1, v1); + } + } + + @Test + public void deleteUntracked_cf() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.put(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + + txn.deleteUntracked(testCf, k1); + assertThat(txn.get(testCf, readOptions, k1)).isNull(); + } + } + + @Test + public void deleteUntracked() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + + txn.deleteUntracked(k1); + assertThat(txn.get(readOptions, k1)).isNull(); + } + } + + @Test + public void deleteUntracked_parts_cf() throws RocksDBException { + final byte keyParts[][] = new byte[][] { + "ke".getBytes(UTF_8), + "y1".getBytes(UTF_8)}; + final byte valueParts[][] = new byte[][] { + "val".getBytes(UTF_8), + "ue1".getBytes(UTF_8)}; + final byte[] key = concat(keyParts); + final byte[] value = concat(valueParts); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.put(testCf, keyParts, valueParts); + assertThat(txn.get(testCf, readOptions, key)).isEqualTo(value); + + txn.deleteUntracked(testCf, keyParts); + assertThat(txn.get(testCf, readOptions, key)).isNull(); + } + } + + @Test + public void deleteUntracked_parts() throws RocksDBException { + final byte keyParts[][] = new byte[][] { + "ke".getBytes(UTF_8), + "y1".getBytes(UTF_8)}; + final byte valueParts[][] = new byte[][] { + "val".getBytes(UTF_8), + "ue1".getBytes(UTF_8)}; + final byte[] key = concat(keyParts); + final byte[] value = concat(valueParts); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.put(keyParts, valueParts); + assertThat(txn.get(readOptions, key)).isEqualTo(value); + + txn.deleteUntracked(keyParts); + assertThat(txn.get(readOptions, key)).isNull(); + } + } + + @Test + public void putLogData() throws RocksDBException { + final byte[] blob = "blobby".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.putLogData(blob); + } + } + + @Test + public void enabledDisableIndexing() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.disableIndexing(); + txn.enableIndexing(); + txn.disableIndexing(); + txn.enableIndexing(); + } + } + + @Test + public void numKeys() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + final byte k2[] = "key2".getBytes(UTF_8); + final byte v2[] = "value2".getBytes(UTF_8); + final byte k3[] = "key3".getBytes(UTF_8); + final byte v3[] = "value3".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + txn.put(k1, v1); + txn.put(testCf, k2, v2); + txn.merge(k3, v3); + txn.delete(testCf, k2); + + assertThat(txn.getNumKeys()).isEqualTo(3); + assertThat(txn.getNumPuts()).isEqualTo(2); + assertThat(txn.getNumMerges()).isEqualTo(1); + assertThat(txn.getNumDeletes()).isEqualTo(1); + } + } + + @Test + public void elapsedTime() throws RocksDBException, InterruptedException { + final long preStartTxnTime = System.currentTimeMillis(); + try (final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + Thread.sleep(2); + + final long txnElapsedTime = txn.getElapsedTime(); + assertThat(txnElapsedTime).isLessThan(System.currentTimeMillis() - preStartTxnTime); + assertThat(txnElapsedTime).isGreaterThan(0); + } + } + + @Test + public void getWriteBatch() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + + txn.put(k1, v1); + + final WriteBatchWithIndex writeBatch = txn.getWriteBatch(); + assertThat(writeBatch).isNotNull(); + assertThat(writeBatch.isOwningHandle()).isFalse(); + assertThat(writeBatch.count()).isEqualTo(1); + } + } + + @Test + public void setLockTimeout() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + txn.setLockTimeout(1000); + } + } + + @Test + public void writeOptions() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final WriteOptions writeOptions = new WriteOptions() + .setDisableWAL(true) + .setSync(true); + final Transaction txn = dbContainer.beginTransaction(writeOptions)) { + + txn.put(k1, v1); + + WriteOptions txnWriteOptions = txn.getWriteOptions(); + assertThat(txnWriteOptions).isNotNull(); + assertThat(txnWriteOptions.isOwningHandle()).isFalse(); + assertThat(txnWriteOptions).isNotSameAs(writeOptions); + assertThat(txnWriteOptions.disableWAL()).isTrue(); + assertThat(txnWriteOptions.sync()).isTrue(); + + txn.setWriteOptions(txnWriteOptions.setSync(false)); + txnWriteOptions = txn.getWriteOptions(); + assertThat(txnWriteOptions).isNotNull(); + assertThat(txnWriteOptions.isOwningHandle()).isFalse(); + assertThat(txnWriteOptions).isNotSameAs(writeOptions); + assertThat(txnWriteOptions.disableWAL()).isTrue(); + assertThat(txnWriteOptions.sync()).isFalse(); + } + } + + @Test + public void undoGetForUpdate_cf() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isNull(); + txn.put(testCf, k1, v1); + assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + txn.undoGetForUpdate(testCf, k1); + } + } + + @Test + public void undoGetForUpdate() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getForUpdate(readOptions, k1, true)).isNull(); + txn.put(k1, v1); + assertThat(txn.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + txn.undoGetForUpdate(k1); + } + } + + @Test + public void rebuildFromWriteBatch() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + final byte k2[] = "key2".getBytes(UTF_8); + final byte v2[] = "value2".getBytes(UTF_8); + final byte k3[] = "key3".getBytes(UTF_8); + final byte v3[] = "value3".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions(); + final Transaction txn = dbContainer.beginTransaction()) { + + txn.put(k1, v1); + + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + assertThat(txn.getNumKeys()).isEqualTo(1); + + try(final WriteBatch writeBatch = new WriteBatch()) { + writeBatch.put(k2, v2); + writeBatch.put(k3, v3); + txn.rebuildFromWriteBatch(writeBatch); + + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + assertThat(txn.get(readOptions, k2)).isEqualTo(v2); + assertThat(txn.get(readOptions, k3)).isEqualTo(v3); + assertThat(txn.getNumKeys()).isEqualTo(3); + } + } + } + + @Test + public void getCommitTimeWriteBatch() throws RocksDBException { + final byte k1[] = "key1".getBytes(UTF_8); + final byte v1[] = "value1".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + + txn.put(k1, v1); + final WriteBatch writeBatch = txn.getCommitTimeWriteBatch(); + + assertThat(writeBatch).isNotNull(); + assertThat(writeBatch.isOwningHandle()).isFalse(); + assertThat(writeBatch.count()).isEqualTo(0); + } + } + + @Test + public void logNumber() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getLogNumber()).isEqualTo(0); + final long logNumber = rand.nextLong(); + txn.setLogNumber(logNumber); + assertThat(txn.getLogNumber()).isEqualTo(logNumber); + } + } + + private static byte[] concat(final byte[][] bufs) { + int resultLength = 0; + for(final byte[] buf : bufs) { + resultLength += buf.length; + } + + final byte[] result = new byte[resultLength]; + int resultOffset = 0; + for(final byte[] buf : bufs) { + final int srcLength = buf.length; + System.arraycopy(buf, 0, result, resultOffset, srcLength); + resultOffset += srcLength; + } + + return result; + } + + private static class TestTransactionNotifier + extends AbstractTransactionNotifier { + private final List createdSnapshots = new ArrayList<>(); + + @Override + public void snapshotCreated(final Snapshot newSnapshot) { + createdSnapshots.add(newSnapshot); + } + + public List getCreatedSnapshots() { + return createdSnapshots; + } + } + + protected static abstract class DBContainer + implements AutoCloseable { + protected final WriteOptions writeOptions; + protected final List columnFamilyHandles; + protected final ColumnFamilyOptions columnFamilyOptions; + protected final DBOptions options; + + public DBContainer(final WriteOptions writeOptions, + final List columnFamilyHandles, + final ColumnFamilyOptions columnFamilyOptions, + final DBOptions options) { + this.writeOptions = writeOptions; + this.columnFamilyHandles = columnFamilyHandles; + this.columnFamilyOptions = columnFamilyOptions; + this.options = options; + } + + public abstract Transaction beginTransaction(); + + public abstract Transaction beginTransaction( + final WriteOptions writeOptions); + + public ColumnFamilyHandle getTestColumnFamily() { + return columnFamilyHandles.get(1); + } + + @Override + public abstract void close(); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java new file mode 100644 index 000000000..794bf04fb --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineOptionsTest.java @@ -0,0 +1,300 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Random; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class BackupEngineOptionsTest { + private final static String ARBITRARY_PATH = + System.getProperty("java.io.tmpdir"); + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + public static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void backupDir() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + assertThat(backupEngineOptions.backupDir()).isEqualTo(ARBITRARY_PATH); + } + } + + @Test + public void env() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + assertThat(backupEngineOptions.backupEnv()).isNull(); + + try(final Env env = new RocksMemEnv(Env.getDefault())) { + backupEngineOptions.setBackupEnv(env); + assertThat(backupEngineOptions.backupEnv()).isEqualTo(env); + } + } + } + + @Test + public void shareTableFiles() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + final boolean value = rand.nextBoolean(); + backupEngineOptions.setShareTableFiles(value); + assertThat(backupEngineOptions.shareTableFiles()).isEqualTo(value); + } + } + + @Test + public void infoLog() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + assertThat(backupEngineOptions.infoLog()).isNull(); + + try(final Options options = new Options(); + final Logger logger = new Logger(options){ + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + + } + }) { + backupEngineOptions.setInfoLog(logger); + assertThat(backupEngineOptions.infoLog()).isEqualTo(logger); + } + } + } + + @Test + public void sync() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + final boolean value = rand.nextBoolean(); + backupEngineOptions.setSync(value); + assertThat(backupEngineOptions.sync()).isEqualTo(value); + } + } + + @Test + public void destroyOldData() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH);) { + final boolean value = rand.nextBoolean(); + backupEngineOptions.setDestroyOldData(value); + assertThat(backupEngineOptions.destroyOldData()).isEqualTo(value); + } + } + + @Test + public void backupLogFiles() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + final boolean value = rand.nextBoolean(); + backupEngineOptions.setBackupLogFiles(value); + assertThat(backupEngineOptions.backupLogFiles()).isEqualTo(value); + } + } + + @Test + public void backupRateLimit() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + final long value = Math.abs(rand.nextLong()); + backupEngineOptions.setBackupRateLimit(value); + assertThat(backupEngineOptions.backupRateLimit()).isEqualTo(value); + // negative will be mapped to 0 + backupEngineOptions.setBackupRateLimit(-1); + assertThat(backupEngineOptions.backupRateLimit()).isEqualTo(0); + } + } + + @Test + public void backupRateLimiter() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + assertThat(backupEngineOptions.backupEnv()).isNull(); + + try(final RateLimiter backupRateLimiter = + new RateLimiter(999)) { + backupEngineOptions.setBackupRateLimiter(backupRateLimiter); + assertThat(backupEngineOptions.backupRateLimiter()).isEqualTo(backupRateLimiter); + } + } + } + + @Test + public void restoreRateLimit() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + final long value = Math.abs(rand.nextLong()); + backupEngineOptions.setRestoreRateLimit(value); + assertThat(backupEngineOptions.restoreRateLimit()).isEqualTo(value); + // negative will be mapped to 0 + backupEngineOptions.setRestoreRateLimit(-1); + assertThat(backupEngineOptions.restoreRateLimit()).isEqualTo(0); + } + } + + @Test + public void restoreRateLimiter() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + assertThat(backupEngineOptions.backupEnv()).isNull(); + + try(final RateLimiter restoreRateLimiter = + new RateLimiter(911)) { + backupEngineOptions.setRestoreRateLimiter(restoreRateLimiter); + assertThat(backupEngineOptions.restoreRateLimiter()).isEqualTo(restoreRateLimiter); + } + } + } + + @Test + public void shareFilesWithChecksum() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + boolean value = rand.nextBoolean(); + backupEngineOptions.setShareFilesWithChecksum(value); + assertThat(backupEngineOptions.shareFilesWithChecksum()).isEqualTo(value); + } + } + + @Test + public void maxBackgroundOperations() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + final int value = rand.nextInt(); + backupEngineOptions.setMaxBackgroundOperations(value); + assertThat(backupEngineOptions.maxBackgroundOperations()).isEqualTo(value); + } + } + + @Test + public void callbackTriggerIntervalSize() { + try (final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH)) { + final long value = rand.nextLong(); + backupEngineOptions.setCallbackTriggerIntervalSize(value); + assertThat(backupEngineOptions.callbackTriggerIntervalSize()).isEqualTo(value); + } + } + + @Test + public void failBackupDirIsNull() { + exception.expect(IllegalArgumentException.class); + try (final BackupEngineOptions opts = new BackupEngineOptions(null)) { + //no-op + } + } + + @Test + public void failBackupDirIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.backupDir(); + } + } + + @Test + public void failSetShareTableFilesIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.setShareTableFiles(true); + } + } + + @Test + public void failShareTableFilesIfDisposed() { + try (BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.shareTableFiles(); + } + } + + @Test + public void failSetSyncIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.setSync(true); + } + } + + @Test + public void failSyncIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.sync(); + } + } + + @Test + public void failSetDestroyOldDataIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.setDestroyOldData(true); + } + } + + @Test + public void failDestroyOldDataIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.destroyOldData(); + } + } + + @Test + public void failSetBackupLogFilesIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.setBackupLogFiles(true); + } + } + + @Test + public void failBackupLogFilesIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.backupLogFiles(); + } + } + + @Test + public void failSetBackupRateLimitIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.setBackupRateLimit(1); + } + } + + @Test + public void failBackupRateLimitIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.backupRateLimit(); + } + } + + @Test + public void failSetRestoreRateLimitIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.setRestoreRateLimit(1); + } + } + + @Test + public void failRestoreRateLimitIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.restoreRateLimit(); + } + } + + @Test + public void failSetShareFilesWithChecksumIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.setShareFilesWithChecksum(true); + } + } + + @Test + public void failShareFilesWithChecksumIfDisposed() { + try (final BackupEngineOptions options = setupUninitializedBackupEngineOptions(exception)) { + options.shareFilesWithChecksum(); + } + } + + private BackupEngineOptions setupUninitializedBackupEngineOptions(ExpectedException exception) { + final BackupEngineOptions backupEngineOptions = new BackupEngineOptions(ARBITRARY_PATH); + backupEngineOptions.close(); + exception.expect(AssertionError.class); + return backupEngineOptions; + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java new file mode 100644 index 000000000..67145f846 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java @@ -0,0 +1,261 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; + +import static org.assertj.core.api.Assertions.assertThat; + +public class BackupEngineTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Rule + public TemporaryFolder backupFolder = new TemporaryFolder(); + + @Test + public void backupDb() throws RocksDBException { + // Open empty database. + try(final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + + // Fill database with some test values + prepareDatabase(db); + + // Create two backups + try (final BackupEngineOptions bopt = + new BackupEngineOptions(backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, false); + be.createNewBackup(db, true); + verifyNumberOfValidBackups(be, 2); + } + } + } + + @Test + public void deleteBackup() throws RocksDBException { + // Open empty database. + try(final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(db); + // Create two backups + try (final BackupEngineOptions bopt = + new BackupEngineOptions(backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, false); + be.createNewBackup(db, true); + final List backupInfo = + verifyNumberOfValidBackups(be, 2); + // Delete the first backup + be.deleteBackup(backupInfo.get(0).backupId()); + final List newBackupInfo = + verifyNumberOfValidBackups(be, 1); + + // The second backup must remain. + assertThat(newBackupInfo.get(0).backupId()). + isEqualTo(backupInfo.get(1).backupId()); + } + } + } + + @Test + public void purgeOldBackups() throws RocksDBException { + // Open empty database. + try(final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(db); + // Create four backups + try (final BackupEngineOptions bopt = + new BackupEngineOptions(backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, false); + be.createNewBackup(db, true); + be.createNewBackup(db, true); + be.createNewBackup(db, true); + final List backupInfo = + verifyNumberOfValidBackups(be, 4); + // Delete everything except the latest backup + be.purgeOldBackups(1); + final List newBackupInfo = + verifyNumberOfValidBackups(be, 1); + // The latest backup must remain. + assertThat(newBackupInfo.get(0).backupId()). + isEqualTo(backupInfo.get(3).backupId()); + } + } + } + + @Test + public void restoreLatestBackup() throws RocksDBException { + try(final Options opt = new Options().setCreateIfMissing(true)) { + // Open empty database. + RocksDB db = null; + try { + db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()); + // Fill database with some test values + prepareDatabase(db); + + try (final BackupEngineOptions bopt = + new BackupEngineOptions(backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, true); + verifyNumberOfValidBackups(be, 1); + db.put("key1".getBytes(), "valueV2".getBytes()); + db.put("key2".getBytes(), "valueV2".getBytes()); + be.createNewBackup(db, true); + verifyNumberOfValidBackups(be, 2); + db.put("key1".getBytes(), "valueV3".getBytes()); + db.put("key2".getBytes(), "valueV3".getBytes()); + assertThat(new String(db.get("key1".getBytes()))).endsWith("V3"); + assertThat(new String(db.get("key2".getBytes()))).endsWith("V3"); + + db.close(); + db = null; + + verifyNumberOfValidBackups(be, 2); + // restore db from latest backup + try(final RestoreOptions ropts = new RestoreOptions(false)) { + be.restoreDbFromLatestBackup(dbFolder.getRoot().getAbsolutePath(), + dbFolder.getRoot().getAbsolutePath(), ropts); + } + + // Open database again. + db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath()); + + // Values must have suffix V2 because of restoring latest backup. + assertThat(new String(db.get("key1".getBytes()))).endsWith("V2"); + assertThat(new String(db.get("key2".getBytes()))).endsWith("V2"); + } + } finally { + if(db != null) { + db.close(); + } + } + } + } + + @Test + public void restoreFromBackup() + throws RocksDBException { + try(final Options opt = new Options().setCreateIfMissing(true)) { + RocksDB db = null; + try { + // Open empty database. + db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()); + // Fill database with some test values + prepareDatabase(db); + try (final BackupEngineOptions bopt = + new BackupEngineOptions(backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, true); + verifyNumberOfValidBackups(be, 1); + db.put("key1".getBytes(), "valueV2".getBytes()); + db.put("key2".getBytes(), "valueV2".getBytes()); + be.createNewBackup(db, true); + verifyNumberOfValidBackups(be, 2); + db.put("key1".getBytes(), "valueV3".getBytes()); + db.put("key2".getBytes(), "valueV3".getBytes()); + assertThat(new String(db.get("key1".getBytes()))).endsWith("V3"); + assertThat(new String(db.get("key2".getBytes()))).endsWith("V3"); + + //close the database + db.close(); + db = null; + + //restore the backup + final List backupInfo = verifyNumberOfValidBackups(be, 2); + // restore db from first backup + be.restoreDbFromBackup(backupInfo.get(0).backupId(), + dbFolder.getRoot().getAbsolutePath(), + dbFolder.getRoot().getAbsolutePath(), + new RestoreOptions(false)); + // Open database again. + db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()); + // Values must have suffix V2 because of restoring latest backup. + assertThat(new String(db.get("key1".getBytes()))).endsWith("V1"); + assertThat(new String(db.get("key2".getBytes()))).endsWith("V1"); + } + } finally { + if(db != null) { + db.close(); + } + } + } + } + + @Test + public void backupDbWithMetadata() throws RocksDBException { + // Open empty database. + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(db); + + // Create two backups + try (final BackupEngineOptions bopt = + new BackupEngineOptions(backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + final String metadata = String.valueOf(ThreadLocalRandom.current().nextInt()); + be.createNewBackupWithMetadata(db, metadata, true); + final List backupInfoList = verifyNumberOfValidBackups(be, 1); + assertThat(backupInfoList.get(0).appMetadata()).isEqualTo(metadata); + } + } + } + + /** + * Verify backups. + * + * @param be {@link BackupEngine} instance. + * @param expectedNumberOfBackups numerical value + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + private List verifyNumberOfValidBackups(final BackupEngine be, + final int expectedNumberOfBackups) throws RocksDBException { + // Verify that backups exist + assertThat(be.getCorruptedBackups().length). + isEqualTo(0); + be.garbageCollect(); + final List backupInfo = be.getBackupInfo(); + assertThat(backupInfo.size()). + isEqualTo(expectedNumberOfBackups); + return backupInfo; + } + + /** + * Fill database with some test values. + * + * @param db {@link RocksDB} instance. + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + private void prepareDatabase(final RocksDB db) + throws RocksDBException { + db.put("key1".getBytes(), "valueV1".getBytes()); + db.put("key2".getBytes(), "valueV1".getBytes()); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/BlobOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/BlobOptionsTest.java new file mode 100644 index 000000000..fe3d9b246 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/BlobOptionsTest.java @@ -0,0 +1,351 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.File; +import java.io.FilenameFilter; +import java.util.*; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class BlobOptionsTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + final int minBlobSize = 65536; + final int largeBlobSize = 65536 * 2; + + /** + * Count the files in the temporary folder which end with a particular suffix + * Used to query the state of a test database to check if it is as the test expects + * + * @param endsWith the suffix to match + * @return the number of files with a matching suffix + */ + @SuppressWarnings("CallToStringConcatCanBeReplacedByOperator") + private int countDBFiles(final String endsWith) { + return Objects + .requireNonNull(dbFolder.getRoot().list(new FilenameFilter() { + @Override + public boolean accept(File dir, String name) { + return name.endsWith(endsWith); + } + })) + .length; + } + + @SuppressWarnings("SameParameterValue") + private byte[] small_key(String suffix) { + return ("small_key_" + suffix).getBytes(UTF_8); + } + + @SuppressWarnings("SameParameterValue") + private byte[] small_value(String suffix) { + return ("small_value_" + suffix).getBytes(UTF_8); + } + + private byte[] large_key(String suffix) { + return ("large_key_" + suffix).getBytes(UTF_8); + } + + private byte[] large_value(String repeat) { + final byte[] large_value = ("" + repeat + "_" + largeBlobSize + "b").getBytes(UTF_8); + final byte[] large_buffer = new byte[largeBlobSize]; + for (int pos = 0; pos < largeBlobSize; pos += large_value.length) { + int numBytes = Math.min(large_value.length, large_buffer.length - pos); + System.arraycopy(large_value, 0, large_buffer, pos, numBytes); + } + return large_buffer; + } + + @Test + public void blobOptions() { + try (final Options options = new Options()) { + assertThat(options.enableBlobFiles()).isEqualTo(false); + assertThat(options.minBlobSize()).isEqualTo(0); + assertThat(options.blobCompressionType()).isEqualTo(CompressionType.NO_COMPRESSION); + assertThat(options.enableBlobGarbageCollection()).isEqualTo(false); + assertThat(options.blobFileSize()).isEqualTo(268435456L); + assertThat(options.blobGarbageCollectionAgeCutoff()).isEqualTo(0.25); + assertThat(options.blobGarbageCollectionForceThreshold()).isEqualTo(1.0); + assertThat(options.blobCompactionReadaheadSize()).isEqualTo(0); + assertThat(options.prepopulateBlobCache()) + .isEqualTo(PrepopulateBlobCache.PREPOPULATE_BLOB_DISABLE); + + assertThat(options.setEnableBlobFiles(true)).isEqualTo(options); + assertThat(options.setMinBlobSize(132768L)).isEqualTo(options); + assertThat(options.setBlobCompressionType(CompressionType.BZLIB2_COMPRESSION)) + .isEqualTo(options); + assertThat(options.setEnableBlobGarbageCollection(true)).isEqualTo(options); + assertThat(options.setBlobFileSize(132768L)).isEqualTo(options); + assertThat(options.setBlobGarbageCollectionAgeCutoff(0.89)).isEqualTo(options); + assertThat(options.setBlobGarbageCollectionForceThreshold(0.80)).isEqualTo(options); + assertThat(options.setBlobCompactionReadaheadSize(262144L)).isEqualTo(options); + assertThat(options.setBlobFileStartingLevel(0)).isEqualTo(options); + assertThat(options.setPrepopulateBlobCache(PrepopulateBlobCache.PREPOPULATE_BLOB_FLUSH_ONLY)) + .isEqualTo(options); + + assertThat(options.enableBlobFiles()).isEqualTo(true); + assertThat(options.minBlobSize()).isEqualTo(132768L); + assertThat(options.blobCompressionType()).isEqualTo(CompressionType.BZLIB2_COMPRESSION); + assertThat(options.enableBlobGarbageCollection()).isEqualTo(true); + assertThat(options.blobFileSize()).isEqualTo(132768L); + assertThat(options.blobGarbageCollectionAgeCutoff()).isEqualTo(0.89); + assertThat(options.blobGarbageCollectionForceThreshold()).isEqualTo(0.80); + assertThat(options.blobCompactionReadaheadSize()).isEqualTo(262144L); + assertThat(options.blobFileStartingLevel()).isEqualTo(0); + assertThat(options.prepopulateBlobCache()) + .isEqualTo(PrepopulateBlobCache.PREPOPULATE_BLOB_FLUSH_ONLY); + } + } + + @Test + public void blobColumnFamilyOptions() { + try (final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions()) { + assertThat(columnFamilyOptions.enableBlobFiles()).isEqualTo(false); + assertThat(columnFamilyOptions.minBlobSize()).isEqualTo(0); + assertThat(columnFamilyOptions.blobCompressionType()) + .isEqualTo(CompressionType.NO_COMPRESSION); + assertThat(columnFamilyOptions.enableBlobGarbageCollection()).isEqualTo(false); + assertThat(columnFamilyOptions.blobFileSize()).isEqualTo(268435456L); + assertThat(columnFamilyOptions.blobGarbageCollectionAgeCutoff()).isEqualTo(0.25); + assertThat(columnFamilyOptions.blobGarbageCollectionForceThreshold()).isEqualTo(1.0); + assertThat(columnFamilyOptions.blobCompactionReadaheadSize()).isEqualTo(0); + + assertThat(columnFamilyOptions.setEnableBlobFiles(true)).isEqualTo(columnFamilyOptions); + assertThat(columnFamilyOptions.setMinBlobSize(132768L)).isEqualTo(columnFamilyOptions); + assertThat(columnFamilyOptions.setBlobCompressionType(CompressionType.BZLIB2_COMPRESSION)) + .isEqualTo(columnFamilyOptions); + assertThat(columnFamilyOptions.setEnableBlobGarbageCollection(true)) + .isEqualTo(columnFamilyOptions); + assertThat(columnFamilyOptions.setBlobFileSize(132768L)).isEqualTo(columnFamilyOptions); + assertThat(columnFamilyOptions.setBlobGarbageCollectionAgeCutoff(0.89)) + .isEqualTo(columnFamilyOptions); + assertThat(columnFamilyOptions.setBlobGarbageCollectionForceThreshold(0.80)) + .isEqualTo(columnFamilyOptions); + assertThat(columnFamilyOptions.setBlobCompactionReadaheadSize(262144L)) + .isEqualTo(columnFamilyOptions); + assertThat(columnFamilyOptions.setBlobFileStartingLevel(0)).isEqualTo(columnFamilyOptions); + assertThat(columnFamilyOptions.setPrepopulateBlobCache( + PrepopulateBlobCache.PREPOPULATE_BLOB_DISABLE)) + .isEqualTo(columnFamilyOptions); + + assertThat(columnFamilyOptions.enableBlobFiles()).isEqualTo(true); + assertThat(columnFamilyOptions.minBlobSize()).isEqualTo(132768L); + assertThat(columnFamilyOptions.blobCompressionType()) + .isEqualTo(CompressionType.BZLIB2_COMPRESSION); + assertThat(columnFamilyOptions.enableBlobGarbageCollection()).isEqualTo(true); + assertThat(columnFamilyOptions.blobFileSize()).isEqualTo(132768L); + assertThat(columnFamilyOptions.blobGarbageCollectionAgeCutoff()).isEqualTo(0.89); + assertThat(columnFamilyOptions.blobGarbageCollectionForceThreshold()).isEqualTo(0.80); + assertThat(columnFamilyOptions.blobCompactionReadaheadSize()).isEqualTo(262144L); + assertThat(columnFamilyOptions.blobFileStartingLevel()).isEqualTo(0); + assertThat(columnFamilyOptions.prepopulateBlobCache()) + .isEqualTo(PrepopulateBlobCache.PREPOPULATE_BLOB_DISABLE); + } + } + + @Test + public void blobMutableColumnFamilyOptionsBuilder() { + final MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder builder = + MutableColumnFamilyOptions.builder(); + builder.setEnableBlobFiles(true) + .setMinBlobSize(1024) + .setBlobFileSize(132768) + .setBlobCompressionType(CompressionType.BZLIB2_COMPRESSION) + .setEnableBlobGarbageCollection(true) + .setBlobGarbageCollectionAgeCutoff(0.89) + .setBlobGarbageCollectionForceThreshold(0.80) + .setBlobCompactionReadaheadSize(262144) + .setBlobFileStartingLevel(1) + .setPrepopulateBlobCache(PrepopulateBlobCache.PREPOPULATE_BLOB_FLUSH_ONLY); + + assertThat(builder.enableBlobFiles()).isEqualTo(true); + assertThat(builder.minBlobSize()).isEqualTo(1024); + assertThat(builder.blobFileSize()).isEqualTo(132768); + assertThat(builder.blobCompressionType()).isEqualTo(CompressionType.BZLIB2_COMPRESSION); + assertThat(builder.enableBlobGarbageCollection()).isEqualTo(true); + assertThat(builder.blobGarbageCollectionAgeCutoff()).isEqualTo(0.89); + assertThat(builder.blobGarbageCollectionForceThreshold()).isEqualTo(0.80); + assertThat(builder.blobCompactionReadaheadSize()).isEqualTo(262144); + assertThat(builder.blobFileStartingLevel()).isEqualTo(1); + assertThat(builder.prepopulateBlobCache()) + .isEqualTo(PrepopulateBlobCache.PREPOPULATE_BLOB_FLUSH_ONLY); + + builder.setEnableBlobFiles(false) + .setMinBlobSize(4096) + .setBlobFileSize(2048) + .setBlobCompressionType(CompressionType.LZ4_COMPRESSION) + .setEnableBlobGarbageCollection(false) + .setBlobGarbageCollectionAgeCutoff(0.91) + .setBlobGarbageCollectionForceThreshold(0.96) + .setBlobCompactionReadaheadSize(1024) + .setBlobFileStartingLevel(0) + .setPrepopulateBlobCache(PrepopulateBlobCache.PREPOPULATE_BLOB_DISABLE); + + assertThat(builder.enableBlobFiles()).isEqualTo(false); + assertThat(builder.minBlobSize()).isEqualTo(4096); + assertThat(builder.blobFileSize()).isEqualTo(2048); + assertThat(builder.blobCompressionType()).isEqualTo(CompressionType.LZ4_COMPRESSION); + assertThat(builder.enableBlobGarbageCollection()).isEqualTo(false); + assertThat(builder.blobGarbageCollectionAgeCutoff()).isEqualTo(0.91); + assertThat(builder.blobGarbageCollectionForceThreshold()).isEqualTo(0.96); + assertThat(builder.blobCompactionReadaheadSize()).isEqualTo(1024); + assertThat(builder.blobFileStartingLevel()).isEqualTo(0); + assertThat(builder.prepopulateBlobCache()) + .isEqualTo(PrepopulateBlobCache.PREPOPULATE_BLOB_DISABLE); + + final MutableColumnFamilyOptions options = builder.build(); + assertThat(options.getKeys()) + .isEqualTo(new String[] {"enable_blob_files", "min_blob_size", "blob_file_size", + "blob_compression_type", "enable_blob_garbage_collection", + "blob_garbage_collection_age_cutoff", "blob_garbage_collection_force_threshold", + "blob_compaction_readahead_size", "blob_file_starting_level", + "prepopulate_blob_cache"}); + assertThat(options.getValues()) + .isEqualTo(new String[] {"false", "4096", "2048", "LZ4_COMPRESSION", "false", "0.91", + "0.96", "1024", "0", "PREPOPULATE_BLOB_DISABLE"}); + } + + /** + * Configure the default column family with BLOBs. + * Confirm that BLOBs are generated when appropriately-sized writes are flushed. + * + * @throws RocksDBException if a db access throws an exception + */ + @Test + public void testBlobWriteAboveThreshold() throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setMinBlobSize(minBlobSize) + .setEnableBlobFiles(true); + + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + db.put(small_key("default"), small_value("default")); + db.flush(new FlushOptions().setWaitForFlush(true)); + + // check there are no blobs in the database + assertThat(countDBFiles(".sst")).isEqualTo(1); + assertThat(countDBFiles(".blob")).isEqualTo(0); + + db.put(large_key("default"), large_value("default")); + db.flush(new FlushOptions().setWaitForFlush(true)); + + // wrote and flushed a value larger than the blobbing threshold + // check there is a single blob in the database + assertThat(countDBFiles(".sst")).isEqualTo(2); + assertThat(countDBFiles(".blob")).isEqualTo(1); + + assertThat(db.get(small_key("default"))).isEqualTo(small_value("default")); + assertThat(db.get(large_key("default"))).isEqualTo(large_value("default")); + + final MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder fetchOptions = + db.getOptions(null); + assertThat(fetchOptions.minBlobSize()).isEqualTo(minBlobSize); + assertThat(fetchOptions.enableBlobFiles()).isEqualTo(true); + assertThat(fetchOptions.writeBufferSize()).isEqualTo(64 << 20); + } + } + + /** + * Configure 2 column families respectively with and without BLOBs. + * Confirm that BLOB files are generated (once the DB is flushed) only for the appropriate column + * family. + * + * @throws RocksDBException if a db access throws an exception + */ + @Test + public void testBlobWriteAboveThresholdCF() throws RocksDBException { + final ColumnFamilyOptions columnFamilyOptions0 = new ColumnFamilyOptions(); + final ColumnFamilyDescriptor columnFamilyDescriptor0 = + new ColumnFamilyDescriptor("default".getBytes(UTF_8), columnFamilyOptions0); + List columnFamilyDescriptors = + Collections.singletonList(columnFamilyDescriptor0); + List columnFamilyHandles = new ArrayList<>(); + + try (final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(dbOptions, dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + db.put(columnFamilyHandles.get(0), small_key("default"), small_value("default")); + db.flush(new FlushOptions().setWaitForFlush(true)); + + assertThat(countDBFiles(".blob")).isEqualTo(0); + + try (final ColumnFamilyOptions columnFamilyOptions1 = + new ColumnFamilyOptions().setMinBlobSize(minBlobSize).setEnableBlobFiles(true); + + final ColumnFamilyOptions columnFamilyOptions2 = + new ColumnFamilyOptions().setMinBlobSize(minBlobSize).setEnableBlobFiles(false)) { + final ColumnFamilyDescriptor columnFamilyDescriptor1 = + new ColumnFamilyDescriptor("column_family_1".getBytes(UTF_8), columnFamilyOptions1); + final ColumnFamilyDescriptor columnFamilyDescriptor2 = + new ColumnFamilyDescriptor("column_family_2".getBytes(UTF_8), columnFamilyOptions2); + + // Create the first column family with blob options + db.createColumnFamily(columnFamilyDescriptor1); + + // Create the second column family with not-blob options + db.createColumnFamily(columnFamilyDescriptor2); + } + } + + // Now re-open after auto-close - at this point the CF options we use are recognized. + try (final ColumnFamilyOptions columnFamilyOptions1 = + new ColumnFamilyOptions().setMinBlobSize(minBlobSize).setEnableBlobFiles(true); + + final ColumnFamilyOptions columnFamilyOptions2 = + new ColumnFamilyOptions().setMinBlobSize(minBlobSize).setEnableBlobFiles(false)) { + assertThat(columnFamilyOptions1.enableBlobFiles()).isEqualTo(true); + assertThat(columnFamilyOptions1.minBlobSize()).isEqualTo(minBlobSize); + assertThat(columnFamilyOptions2.enableBlobFiles()).isEqualTo(false); + assertThat(columnFamilyOptions1.minBlobSize()).isEqualTo(minBlobSize); + + final ColumnFamilyDescriptor columnFamilyDescriptor1 = + new ColumnFamilyDescriptor("column_family_1".getBytes(UTF_8), columnFamilyOptions1); + final ColumnFamilyDescriptor columnFamilyDescriptor2 = + new ColumnFamilyDescriptor("column_family_2".getBytes(UTF_8), columnFamilyOptions2); + columnFamilyDescriptors = new ArrayList<>(); + columnFamilyDescriptors.add(columnFamilyDescriptor0); + columnFamilyDescriptors.add(columnFamilyDescriptor1); + columnFamilyDescriptors.add(columnFamilyDescriptor2); + columnFamilyHandles = new ArrayList<>(); + + assertThat(columnFamilyDescriptor1.getOptions().enableBlobFiles()).isEqualTo(true); + assertThat(columnFamilyDescriptor2.getOptions().enableBlobFiles()).isEqualTo(false); + + try (final DBOptions dbOptions = new DBOptions(); + final RocksDB db = RocksDB.open(dbOptions, dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + final MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder builder1 = + db.getOptions(columnFamilyHandles.get(1)); + assertThat(builder1.enableBlobFiles()).isEqualTo(true); + assertThat(builder1.minBlobSize()).isEqualTo(minBlobSize); + + final MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder builder2 = + db.getOptions(columnFamilyHandles.get(2)); + assertThat(builder2.enableBlobFiles()).isEqualTo(false); + assertThat(builder2.minBlobSize()).isEqualTo(minBlobSize); + + db.put(columnFamilyHandles.get(1), large_key("column_family_1_k2"), + large_value("column_family_1_k2")); + db.flush(new FlushOptions().setWaitForFlush(true), columnFamilyHandles.get(1)); + assertThat(countDBFiles(".blob")).isEqualTo(1); + + db.put(columnFamilyHandles.get(2), large_key("column_family_2_k2"), + large_value("column_family_2_k2")); + db.flush(new FlushOptions().setWaitForFlush(true), columnFamilyHandles.get(2)); + assertThat(countDBFiles(".blob")).isEqualTo(1); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java new file mode 100644 index 000000000..330881764 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java @@ -0,0 +1,490 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.fail; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.stream.Stream; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class BlockBasedTableConfigTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void cacheIndexAndFilterBlocks() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setCacheIndexAndFilterBlocks(true); + assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocks()). + isTrue(); + } + + @Test + public void cacheIndexAndFilterBlocksWithHighPriority() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocksWithHighPriority()). + isTrue(); + blockBasedTableConfig.setCacheIndexAndFilterBlocksWithHighPriority(false); + assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocksWithHighPriority()).isFalse(); + } + + @Test + public void pinL0FilterAndIndexBlocksInCache() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setPinL0FilterAndIndexBlocksInCache(true); + assertThat(blockBasedTableConfig.pinL0FilterAndIndexBlocksInCache()). + isTrue(); + } + + @Test + public void pinTopLevelIndexAndFilter() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setPinTopLevelIndexAndFilter(false); + assertThat(blockBasedTableConfig.pinTopLevelIndexAndFilter()). + isFalse(); + } + + @Test + public void indexType() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + assertThat(IndexType.values().length).isEqualTo(4); + blockBasedTableConfig.setIndexType(IndexType.kHashSearch); + assertThat(blockBasedTableConfig.indexType()).isEqualTo(IndexType.kHashSearch); + assertThat(IndexType.valueOf("kBinarySearch")).isNotNull(); + blockBasedTableConfig.setIndexType(IndexType.valueOf("kBinarySearch")); + assertThat(blockBasedTableConfig.indexType()).isEqualTo(IndexType.kBinarySearch); + } + + @Test + public void dataBlockIndexType() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinaryAndHash); + assertThat(blockBasedTableConfig.dataBlockIndexType()) + .isEqualTo(DataBlockIndexType.kDataBlockBinaryAndHash); + blockBasedTableConfig.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinarySearch); + assertThat(blockBasedTableConfig.dataBlockIndexType()) + .isEqualTo(DataBlockIndexType.kDataBlockBinarySearch); + } + + @Test + public void checksumType() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + assertThat(ChecksumType.values().length).isEqualTo(5); + assertThat(ChecksumType.valueOf("kxxHash")). + isEqualTo(ChecksumType.kxxHash); + blockBasedTableConfig.setChecksumType(ChecksumType.kNoChecksum); + assertThat(blockBasedTableConfig.checksumType()).isEqualTo(ChecksumType.kNoChecksum); + blockBasedTableConfig.setChecksumType(ChecksumType.kxxHash); + assertThat(blockBasedTableConfig.checksumType()).isEqualTo(ChecksumType.kxxHash); + blockBasedTableConfig.setChecksumType(ChecksumType.kxxHash64); + assertThat(blockBasedTableConfig.checksumType()).isEqualTo(ChecksumType.kxxHash64); + blockBasedTableConfig.setChecksumType(ChecksumType.kXXH3); + assertThat(blockBasedTableConfig.checksumType()).isEqualTo(ChecksumType.kXXH3); + } + + @Test + public void jniPortal() throws Exception { + // Verifies that the JNI layer is correctly translating options. + // Since introspecting the options requires creating a database, the checks + // cover multiple options at the same time. + + final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig(); + + tableConfig.setIndexType(IndexType.kBinarySearch); + tableConfig.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinarySearch); + tableConfig.setChecksumType(ChecksumType.kNoChecksum); + try (final Options options = new Options().setTableFormatConfig(tableConfig)) { + String opts = getOptionAsString(options); + assertThat(opts).contains("index_type=kBinarySearch"); + assertThat(opts).contains("data_block_index_type=kDataBlockBinarySearch"); + assertThat(opts).contains("checksum=kNoChecksum"); + } + + tableConfig.setIndexType(IndexType.kHashSearch); + tableConfig.setDataBlockIndexType(DataBlockIndexType.kDataBlockBinaryAndHash); + tableConfig.setChecksumType(ChecksumType.kCRC32c); + try (final Options options = new Options().setTableFormatConfig(tableConfig)) { + options.useCappedPrefixExtractor(1); // Needed to use kHashSearch + String opts = getOptionAsString(options); + assertThat(opts).contains("index_type=kHashSearch"); + assertThat(opts).contains("data_block_index_type=kDataBlockBinaryAndHash"); + assertThat(opts).contains("checksum=kCRC32c"); + } + + tableConfig.setIndexType(IndexType.kTwoLevelIndexSearch); + tableConfig.setChecksumType(ChecksumType.kxxHash); + try (final Options options = new Options().setTableFormatConfig(tableConfig)) { + String opts = getOptionAsString(options); + assertThat(opts).contains("index_type=kTwoLevelIndexSearch"); + assertThat(opts).contains("checksum=kxxHash"); + } + + tableConfig.setIndexType(IndexType.kBinarySearchWithFirstKey); + tableConfig.setChecksumType(ChecksumType.kxxHash64); + try (final Options options = new Options().setTableFormatConfig(tableConfig)) { + String opts = getOptionAsString(options); + assertThat(opts).contains("index_type=kBinarySearchWithFirstKey"); + assertThat(opts).contains("checksum=kxxHash64"); + } + + tableConfig.setChecksumType(ChecksumType.kXXH3); + try (final Options options = new Options().setTableFormatConfig(tableConfig)) { + String opts = getOptionAsString(options); + assertThat(opts).contains("checksum=kXXH3"); + } + } + + private String getOptionAsString(Options options) throws Exception { + options.setCreateIfMissing(true); + String dbPath = dbFolder.getRoot().getAbsolutePath(); + String result; + try (final RocksDB db = RocksDB.open(options, dbPath); + final Stream pathStream = Files.walk(Paths.get(dbPath))) { + Path optionsPath = + pathStream + .filter(p -> p.getFileName().toString().startsWith("OPTIONS")) + .findAny() + .orElseThrow(() -> new AssertionError("Missing options file")); + byte[] optionsData = Files.readAllBytes(optionsPath); + result = new String(optionsData, StandardCharsets.UTF_8); + } + RocksDB.destroyDB(dbPath, options); + return result; + } + + @Test + public void noBlockCache() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setNoBlockCache(true); + assertThat(blockBasedTableConfig.noBlockCache()).isTrue(); + } + + @Test + public void blockCache() { + try ( + final Cache cache = new LRUCache(17 * 1024 * 1024); + final Options options = new Options().setTableFormatConfig( + new BlockBasedTableConfig().setBlockCache(cache))) { + assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable"); + } + } + + @Test + public void blockCacheIntegration() throws RocksDBException { + try (final Cache cache = new LRUCache(8 * 1024 * 1024); + final Statistics statistics = new Statistics()) { + for (int shard = 0; shard < 8; shard++) { + try (final Options options = + new Options() + .setCreateIfMissing(true) + .setStatistics(statistics) + .setTableFormatConfig(new BlockBasedTableConfig().setBlockCache(cache)); + final RocksDB db = + RocksDB.open(options, dbFolder.getRoot().getAbsolutePath() + "/" + shard)) { + final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8); + final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8); + + db.put(key, value); + db.flush(new FlushOptions()); + db.get(key); + + assertThat(statistics.getTickerCount(TickerType.BLOCK_CACHE_ADD)).isEqualTo(shard + 1); + } + } + } + } + + @Test + public void persistentCache() throws RocksDBException { + try (final DBOptions dbOptions = new DBOptions(). + setInfoLogLevel(InfoLogLevel.INFO_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(dbOptions) { + @Override + protected void log(final InfoLogLevel infoLogLevel, final String logMsg) { + System.out.println(infoLogLevel.name() + ": " + logMsg); + } + }) { + try (final PersistentCache persistentCache = + new PersistentCache(Env.getDefault(), dbFolder.getRoot().getPath(), 1024 * 1024 * 100, logger, false); + final Options options = new Options().setTableFormatConfig( + new BlockBasedTableConfig().setPersistentCache(persistentCache))) { + assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable"); + } + } + } + + @Test + public void blockCacheCompressed() { + try (final Cache cache = new LRUCache(17 * 1024 * 1024); + final Options options = new Options().setTableFormatConfig( + new BlockBasedTableConfig().setBlockCacheCompressed(cache))) { + assertThat(options.tableFactoryName()).isEqualTo("BlockBasedTable"); + } + } + + @Ignore("See issue: https://github.com/facebook/rocksdb/issues/4822") + @Test + public void blockCacheCompressedIntegration() throws RocksDBException { + final byte[] key1 = "some-key1".getBytes(StandardCharsets.UTF_8); + final byte[] key2 = "some-key1".getBytes(StandardCharsets.UTF_8); + final byte[] key3 = "some-key1".getBytes(StandardCharsets.UTF_8); + final byte[] key4 = "some-key1".getBytes(StandardCharsets.UTF_8); + final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8); + + try (final Cache compressedCache = new LRUCache(8 * 1024 * 1024); + final Statistics statistics = new Statistics()) { + + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig() + .setNoBlockCache(true) + .setBlockCache(null) + .setBlockCacheCompressed(compressedCache) + .setFormatVersion(4); + + try (final Options options = new Options() + .setCreateIfMissing(true) + .setStatistics(statistics) + .setTableFormatConfig(blockBasedTableConfig)) { + + for (int shard = 0; shard < 8; shard++) { + try (final FlushOptions flushOptions = new FlushOptions(); + final WriteOptions writeOptions = new WriteOptions(); + final ReadOptions readOptions = new ReadOptions(); + final RocksDB db = + RocksDB.open(options, dbFolder.getRoot().getAbsolutePath() + "/" + shard)) { + + db.put(writeOptions, key1, value); + db.put(writeOptions, key2, value); + db.put(writeOptions, key3, value); + db.put(writeOptions, key4, value); + db.flush(flushOptions); + + db.get(readOptions, key1); + db.get(readOptions, key2); + db.get(readOptions, key3); + db.get(readOptions, key4); + + assertThat(statistics.getTickerCount(TickerType.BLOCK_CACHE_COMPRESSED_ADD)).isEqualTo(shard + 1); + } + } + } + } + } + + @Test + public void blockSize() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockSize(10); + assertThat(blockBasedTableConfig.blockSize()).isEqualTo(10); + } + + @Test + public void blockSizeDeviation() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockSizeDeviation(12); + assertThat(blockBasedTableConfig.blockSizeDeviation()). + isEqualTo(12); + } + + @Test + public void blockRestartInterval() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockRestartInterval(15); + assertThat(blockBasedTableConfig.blockRestartInterval()). + isEqualTo(15); + } + + @Test + public void indexBlockRestartInterval() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setIndexBlockRestartInterval(15); + assertThat(blockBasedTableConfig.indexBlockRestartInterval()). + isEqualTo(15); + } + + @Test + public void metadataBlockSize() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setMetadataBlockSize(1024); + assertThat(blockBasedTableConfig.metadataBlockSize()). + isEqualTo(1024); + } + + @Test + public void partitionFilters() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setPartitionFilters(true); + assertThat(blockBasedTableConfig.partitionFilters()). + isTrue(); + } + + @Test + public void optimizeFiltersForMemory() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setOptimizeFiltersForMemory(true); + assertThat(blockBasedTableConfig.optimizeFiltersForMemory()).isTrue(); + } + + @Test + public void useDeltaEncoding() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setUseDeltaEncoding(false); + assertThat(blockBasedTableConfig.useDeltaEncoding()). + isFalse(); + } + + @Test + public void blockBasedTableWithFilterPolicy() { + try(final Options options = new Options() + .setTableFormatConfig(new BlockBasedTableConfig() + .setFilterPolicy(new BloomFilter(10)))) { + assertThat(options.tableFactoryName()). + isEqualTo("BlockBasedTable"); + } + } + + @Test + public void blockBasedTableWithoutFilterPolicy() { + try(final Options options = new Options().setTableFormatConfig( + new BlockBasedTableConfig().setFilterPolicy(null))) { + assertThat(options.tableFactoryName()). + isEqualTo("BlockBasedTable"); + } + } + + @Test + public void wholeKeyFiltering() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setWholeKeyFiltering(false); + assertThat(blockBasedTableConfig.wholeKeyFiltering()). + isFalse(); + } + + @Test + public void verifyCompression() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + assertThat(blockBasedTableConfig.verifyCompression()).isFalse(); + blockBasedTableConfig.setVerifyCompression(true); + assertThat(blockBasedTableConfig.verifyCompression()). + isTrue(); + } + + @Test + public void readAmpBytesPerBit() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setReadAmpBytesPerBit(2); + assertThat(blockBasedTableConfig.readAmpBytesPerBit()). + isEqualTo(2); + } + + @Test + public void formatVersion() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + for (int version = 0; version <= 5; version++) { + blockBasedTableConfig.setFormatVersion(version); + assertThat(blockBasedTableConfig.formatVersion()).isEqualTo(version); + } + } + + @Test(expected = AssertionError.class) + public void formatVersionFailNegative() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setFormatVersion(-1); + } + + @Test(expected = RocksDBException.class) + public void invalidFormatVersion() throws RocksDBException { + final BlockBasedTableConfig blockBasedTableConfig = + new BlockBasedTableConfig().setFormatVersion(99999); + + try (final Options options = new Options().setTableFormatConfig(blockBasedTableConfig); + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + fail("Opening the database with an invalid format_version should have raised an exception"); + } + } + + @Test + public void enableIndexCompression() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setEnableIndexCompression(false); + assertThat(blockBasedTableConfig.enableIndexCompression()). + isFalse(); + } + + @Test + public void blockAlign() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockAlign(true); + assertThat(blockBasedTableConfig.blockAlign()). + isTrue(); + } + + @Test + public void indexShortening() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setIndexShortening(IndexShorteningMode.kShortenSeparatorsAndSuccessor); + assertThat(blockBasedTableConfig.indexShortening()) + .isEqualTo(IndexShorteningMode.kShortenSeparatorsAndSuccessor); + } + + @Deprecated + @Test + public void hashIndexAllowCollision() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setHashIndexAllowCollision(false); + assertThat(blockBasedTableConfig.hashIndexAllowCollision()). + isTrue(); // NOTE: setHashIndexAllowCollision should do nothing! + } + + @Deprecated + @Test + public void blockCacheSize() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockCacheSize(8 * 1024); + assertThat(blockBasedTableConfig.blockCacheSize()). + isEqualTo(8 * 1024); + } + + @Deprecated + @Test + public void blockCacheNumShardBits() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setCacheNumShardBits(5); + assertThat(blockBasedTableConfig.cacheNumShardBits()). + isEqualTo(5); + } + + @Deprecated + @Test + public void blockCacheCompressedSize() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockCacheCompressedSize(40); + assertThat(blockBasedTableConfig.blockCacheCompressedSize()). + isEqualTo(40); + } + + @Deprecated + @Test + public void blockCacheCompressedNumShardBits() { + final BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockCacheCompressedNumShardBits(4); + assertThat(blockBasedTableConfig.blockCacheCompressedNumShardBits()). + isEqualTo(4); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/BuiltinComparatorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/BuiltinComparatorTest.java new file mode 100644 index 000000000..e238ae07b --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/BuiltinComparatorTest.java @@ -0,0 +1,145 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class BuiltinComparatorTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void builtinForwardComparator() + throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(BuiltinComparator.BYTEWISE_COMPARATOR); + final RocksDB rocksDb = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + rocksDb.put("abc1".getBytes(), "abc1".getBytes()); + rocksDb.put("abc2".getBytes(), "abc2".getBytes()); + rocksDb.put("abc3".getBytes(), "abc3".getBytes()); + + try(final RocksIterator rocksIterator = rocksDb.newIterator()) { + // Iterate over keys using a iterator + rocksIterator.seekToFirst(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc2".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc2".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isFalse(); + // Get last one + rocksIterator.seekToLast(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); + // Seek for abc + rocksIterator.seek("abc".getBytes()); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); + } + } + } + + @Test + public void builtinReverseComparator() + throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR); + final RocksDB rocksDb = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + + rocksDb.put("abc1".getBytes(), "abc1".getBytes()); + rocksDb.put("abc2".getBytes(), "abc2".getBytes()); + rocksDb.put("abc3".getBytes(), "abc3".getBytes()); + + try (final RocksIterator rocksIterator = rocksDb.newIterator()) { + // Iterate over keys using a iterator + rocksIterator.seekToFirst(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc2".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc2".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isFalse(); + // Get last one + rocksIterator.seekToLast(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); + // Will be invalid because abc is after abc1 + rocksIterator.seek("abc".getBytes()); + assertThat(rocksIterator.isValid()).isFalse(); + // Will be abc3 because the next one after abc999 + // is abc3 + rocksIterator.seek("abc999".getBytes()); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); + } + } + } + + @Test + public void builtinComparatorEnum(){ + assertThat(BuiltinComparator.BYTEWISE_COMPARATOR.ordinal()) + .isEqualTo(0); + assertThat( + BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR.ordinal()) + .isEqualTo(1); + assertThat(BuiltinComparator.values().length).isEqualTo(2); + assertThat(BuiltinComparator.valueOf("BYTEWISE_COMPARATOR")). + isEqualTo(BuiltinComparator.BYTEWISE_COMPARATOR); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java new file mode 100644 index 000000000..fe950362b --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/BytewiseComparatorRegressionTest.java @@ -0,0 +1,126 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.junit.Assert.assertArrayEquals; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.util.BytewiseComparator; + +/** + * This test confirms that the following issues were in fact resolved + * by a change made between 6.2.2 and 6.22.1, + * to wit {@link ...} + * which as part of its effect, changed the Java bytewise comparators. + * + * {@link ...} + * {@link ...} + */ +public class BytewiseComparatorRegressionTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Rule public TemporaryFolder temporarySSTFolder = new TemporaryFolder(); + + private final static byte[][] testData = {{10, -11, 13}, {10, 11, 12}, {10, 11, 14}}; + private final static byte[][] orderedData = {{10, 11, 12}, {10, 11, 14}, {10, -11, 13}}; + + /** + * {@link ...} + */ + @Test + public void testJavaComparator() throws RocksDBException { + final BytewiseComparator comparator = new BytewiseComparator(new ComparatorOptions()); + performTest(new Options().setCreateIfMissing(true).setComparator(comparator)); + } + + @Test + public void testDefaultComparator() throws RocksDBException { + performTest(new Options().setCreateIfMissing(true)); + } + + /** + * {@link ...} + */ + @Test + public void testCppComparator() throws RocksDBException { + performTest(new Options().setCreateIfMissing(true).setComparator( + BuiltinComparator.BYTEWISE_COMPARATOR)); + } + + private void performTest(final Options options) throws RocksDBException { + try (final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + for (final byte[] item : testData) { + db.put(item, item); + } + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekToFirst(); + final ArrayList result = new ArrayList<>(); + while (iterator.isValid()) { + result.add(iterator.key()); + iterator.next(); + } + assertArrayEquals(orderedData, result.toArray()); + } + } + } + + private byte[] hexToByte(final String hexString) { + final byte[] bytes = new byte[hexString.length() / 2]; + if (bytes.length * 2 < hexString.length()) { + throw new RuntimeException("Hex string has odd length: " + hexString); + } + + for (int i = 0; i < bytes.length; i++) { + final int firstDigit = toDigit(hexString.charAt(i + i)); + final int secondDigit = toDigit(hexString.charAt(i + i + 1)); + bytes[i] = (byte) ((firstDigit << 4) + secondDigit); + } + + return bytes; + } + + private int toDigit(final char hexChar) { + final int digit = Character.digit(hexChar, 16); + if (digit == -1) { + throw new IllegalArgumentException("Invalid Hexadecimal Character: " + hexChar); + } + return digit; + } + + /** + * {@link ...} + * + * @throws RocksDBException if something goes wrong, or if the regression occurs + * @throws IOException if we can't make the temporary file + */ + @Test + public void testSST() throws RocksDBException, IOException { + final File tempSSTFile = temporarySSTFolder.newFile("test_file_with_weird_keys.sst"); + + final EnvOptions envOpts = new EnvOptions(); + final Options opts = new Options(); + opts.setComparator(new BytewiseComparator(new ComparatorOptions())); + final SstFileWriter writer = new SstFileWriter(envOpts, opts); + writer.open(tempSSTFile.getAbsolutePath()); + final byte[] gKey = + hexToByte("000000293030303030303030303030303030303030303032303736343730696E666F33"); + final byte[] wKey = + hexToByte("0000008d3030303030303030303030303030303030303030303437363433696e666f34"); + writer.put(new Slice(gKey), new Slice("dummyV1")); + writer.put(new Slice(wKey), new Slice("dummyV2")); + writer.finish(); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java new file mode 100644 index 000000000..c2cc6fc62 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java @@ -0,0 +1,83 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CheckPointTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Rule + public TemporaryFolder checkpointFolder = new TemporaryFolder(); + + @Test + public void checkPoint() throws RocksDBException { + try (final Options options = new Options(). + setCreateIfMissing(true)) { + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + try (final Checkpoint checkpoint = Checkpoint.create(db)) { + checkpoint.createCheckpoint(checkpointFolder. + getRoot().getAbsolutePath() + "/snapshot1"); + db.put("key2".getBytes(), "value2".getBytes()); + checkpoint.createCheckpoint(checkpointFolder. + getRoot().getAbsolutePath() + "/snapshot2"); + } + } + + try (final RocksDB db = RocksDB.open(options, + checkpointFolder.getRoot().getAbsolutePath() + + "/snapshot1")) { + assertThat(new String(db.get("key".getBytes()))). + isEqualTo("value"); + assertThat(db.get("key2".getBytes())).isNull(); + } + + try (final RocksDB db = RocksDB.open(options, + checkpointFolder.getRoot().getAbsolutePath() + + "/snapshot2")) { + assertThat(new String(db.get("key".getBytes()))). + isEqualTo("value"); + assertThat(new String(db.get("key2".getBytes()))). + isEqualTo("value2"); + } + } + } + + @Test(expected = IllegalArgumentException.class) + public void failIfDbIsNull() { + try (final Checkpoint checkpoint = Checkpoint.create(null)) { + + } + } + + @Test(expected = IllegalStateException.class) + public void failIfDbNotInitialized() throws RocksDBException { + try (final RocksDB db = RocksDB.open( + dbFolder.getRoot().getAbsolutePath())) { + db.close(); + Checkpoint.create(db); + } + } + + @Test(expected = RocksDBException.class) + public void failWithIllegalPath() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final Checkpoint checkpoint = Checkpoint.create(db)) { + checkpoint.createCheckpoint("/Z:///:\\C:\\TZ/-"); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java new file mode 100644 index 000000000..d1241ac75 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/ClockCacheTest.java @@ -0,0 +1,26 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +public class ClockCacheTest { + + static { + RocksDB.loadLibrary(); + } + + @Test + public void newClockCache() { + final long capacity = 1000; + final int numShardBits = 16; + final boolean strictCapacityLimit = true; + try(final Cache clockCache = new ClockCache(capacity, + numShardBits, strictCapacityLimit)) { + //no op + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java new file mode 100644 index 000000000..7d7581048 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java @@ -0,0 +1,714 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.*; +import org.junit.ClassRule; +import org.junit.Test; +import org.rocksdb.test.RemoveEmptyValueCompactionFilterFactory; + +public class ColumnFamilyOptionsTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + public static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void copyConstructor() { + ColumnFamilyOptions origOpts = new ColumnFamilyOptions(); + origOpts.setNumLevels(rand.nextInt(8)); + origOpts.setTargetFileSizeMultiplier(rand.nextInt(100)); + origOpts.setLevel0StopWritesTrigger(rand.nextInt(50)); + ColumnFamilyOptions copyOpts = new ColumnFamilyOptions(origOpts); + assertThat(origOpts.numLevels()).isEqualTo(copyOpts.numLevels()); + assertThat(origOpts.targetFileSizeMultiplier()).isEqualTo(copyOpts.targetFileSizeMultiplier()); + assertThat(origOpts.level0StopWritesTrigger()).isEqualTo(copyOpts.level0StopWritesTrigger()); + } + + @Test + public void getColumnFamilyOptionsFromProps() { + Properties properties = new Properties(); + properties.put("write_buffer_size", "112"); + properties.put("max_write_buffer_number", "13"); + + try (final ColumnFamilyOptions opt = ColumnFamilyOptions. + getColumnFamilyOptionsFromProps(properties)) { + // setup sample properties + assertThat(opt).isNotNull(); + assertThat(String.valueOf(opt.writeBufferSize())). + isEqualTo(properties.get("write_buffer_size")); + assertThat(String.valueOf(opt.maxWriteBufferNumber())). + isEqualTo(properties.get("max_write_buffer_number")); + } + } + + @Test + public void getColumnFamilyOptionsFromPropsWithIgnoreIllegalValue() { + // setup sample properties + final Properties properties = new Properties(); + properties.put("tomato", "1024"); + properties.put("burger", "2"); + properties.put("write_buffer_size", "112"); + properties.put("max_write_buffer_number", "13"); + + try (final ConfigOptions cfgOpts = new ConfigOptions().setIgnoreUnknownOptions(true); + final ColumnFamilyOptions opt = + ColumnFamilyOptions.getColumnFamilyOptionsFromProps(cfgOpts, properties)) { + // setup sample properties + assertThat(opt).isNotNull(); + assertThat(String.valueOf(opt.writeBufferSize())) + .isEqualTo(properties.get("write_buffer_size")); + assertThat(String.valueOf(opt.maxWriteBufferNumber())) + .isEqualTo(properties.get("max_write_buffer_number")); + } + } + + @Test + public void failColumnFamilyOptionsFromPropsWithIllegalValue() { + // setup sample properties + final Properties properties = new Properties(); + properties.put("tomato", "1024"); + properties.put("burger", "2"); + + try (final ColumnFamilyOptions opt = + ColumnFamilyOptions.getColumnFamilyOptionsFromProps(properties)) { + assertThat(opt).isNull(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void failColumnFamilyOptionsFromPropsWithNullValue() { + try (final ColumnFamilyOptions opt = + ColumnFamilyOptions.getColumnFamilyOptionsFromProps(null)) { + } + } + + @Test(expected = IllegalArgumentException.class) + public void failColumnFamilyOptionsFromPropsWithEmptyProps() { + try (final ColumnFamilyOptions opt = + ColumnFamilyOptions.getColumnFamilyOptionsFromProps( + new Properties())) { + } + } + + @Test + public void writeBufferSize() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setWriteBufferSize(longValue); + assertThat(opt.writeBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void maxWriteBufferNumber() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxWriteBufferNumber(intValue); + assertThat(opt.maxWriteBufferNumber()).isEqualTo(intValue); + } + } + + @Test + public void minWriteBufferNumberToMerge() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setMinWriteBufferNumberToMerge(intValue); + assertThat(opt.minWriteBufferNumberToMerge()).isEqualTo(intValue); + } + } + + @Test + public void numLevels() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setNumLevels(intValue); + assertThat(opt.numLevels()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroFileNumCompactionTrigger() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroFileNumCompactionTrigger(intValue); + assertThat(opt.levelZeroFileNumCompactionTrigger()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroSlowdownWritesTrigger() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroSlowdownWritesTrigger(intValue); + assertThat(opt.levelZeroSlowdownWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroStopWritesTrigger() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroStopWritesTrigger(intValue); + assertThat(opt.levelZeroStopWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void targetFileSizeBase() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setTargetFileSizeBase(longValue); + assertThat(opt.targetFileSizeBase()).isEqualTo(longValue); + } + } + + @Test + public void targetFileSizeMultiplier() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setTargetFileSizeMultiplier(intValue); + assertThat(opt.targetFileSizeMultiplier()).isEqualTo(intValue); + } + } + + @Test + public void maxBytesForLevelBase() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxBytesForLevelBase(longValue); + assertThat(opt.maxBytesForLevelBase()).isEqualTo(longValue); + } + } + + @Test + public void levelCompactionDynamicLevelBytes() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setLevelCompactionDynamicLevelBytes(boolValue); + assertThat(opt.levelCompactionDynamicLevelBytes()) + .isEqualTo(boolValue); + } + } + + @Test + public void maxBytesForLevelMultiplier() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final double doubleValue = rand.nextDouble(); + opt.setMaxBytesForLevelMultiplier(doubleValue); + assertThat(opt.maxBytesForLevelMultiplier()).isEqualTo(doubleValue); + } + } + + @Test + public void maxBytesForLevelMultiplierAdditional() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue1 = rand.nextInt(); + final int intValue2 = rand.nextInt(); + final int[] ints = new int[]{intValue1, intValue2}; + opt.setMaxBytesForLevelMultiplierAdditional(ints); + assertThat(opt.maxBytesForLevelMultiplierAdditional()).isEqualTo(ints); + } + } + + @Test + public void maxCompactionBytes() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxCompactionBytes(longValue); + assertThat(opt.maxCompactionBytes()).isEqualTo(longValue); + } + } + + @Test + public void softPendingCompactionBytesLimit() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setSoftPendingCompactionBytesLimit(longValue); + assertThat(opt.softPendingCompactionBytesLimit()).isEqualTo(longValue); + } + } + + @Test + public void hardPendingCompactionBytesLimit() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setHardPendingCompactionBytesLimit(longValue); + assertThat(opt.hardPendingCompactionBytesLimit()).isEqualTo(longValue); + } + } + + @Test + public void level0FileNumCompactionTrigger() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setLevel0FileNumCompactionTrigger(intValue); + assertThat(opt.level0FileNumCompactionTrigger()).isEqualTo(intValue); + } + } + + @Test + public void level0SlowdownWritesTrigger() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setLevel0SlowdownWritesTrigger(intValue); + assertThat(opt.level0SlowdownWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void level0StopWritesTrigger() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setLevel0StopWritesTrigger(intValue); + assertThat(opt.level0StopWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void arenaBlockSize() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setArenaBlockSize(longValue); + assertThat(opt.arenaBlockSize()).isEqualTo(longValue); + } + } + + @Test + public void disableAutoCompactions() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDisableAutoCompactions(boolValue); + assertThat(opt.disableAutoCompactions()).isEqualTo(boolValue); + } + } + + @Test + public void maxSequentialSkipInIterations() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxSequentialSkipInIterations(longValue); + assertThat(opt.maxSequentialSkipInIterations()).isEqualTo(longValue); + } + } + + @Test + public void inplaceUpdateSupport() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setInplaceUpdateSupport(boolValue); + assertThat(opt.inplaceUpdateSupport()).isEqualTo(boolValue); + } + } + + @Test + public void inplaceUpdateNumLocks() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setInplaceUpdateNumLocks(longValue); + assertThat(opt.inplaceUpdateNumLocks()).isEqualTo(longValue); + } + } + + @Test + public void memtablePrefixBloomSizeRatio() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final double doubleValue = rand.nextDouble(); + opt.setMemtablePrefixBloomSizeRatio(doubleValue); + assertThat(opt.memtablePrefixBloomSizeRatio()).isEqualTo(doubleValue); + } + } + + @Test + public void experimentalMempurgeThreshold() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final double doubleValue = rand.nextDouble(); + opt.setExperimentalMempurgeThreshold(doubleValue); + assertThat(opt.experimentalMempurgeThreshold()).isEqualTo(doubleValue); + } + } + + @Test + public void memtableWholeKeyFiltering() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean booleanValue = rand.nextBoolean(); + opt.setMemtableWholeKeyFiltering(booleanValue); + assertThat(opt.memtableWholeKeyFiltering()).isEqualTo(booleanValue); + } + } + + @Test + public void memtableHugePageSize() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setMemtableHugePageSize(longValue); + assertThat(opt.memtableHugePageSize()).isEqualTo(longValue); + } + } + + @Test + public void bloomLocality() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setBloomLocality(intValue); + assertThat(opt.bloomLocality()).isEqualTo(intValue); + } + } + + @Test + public void maxSuccessiveMerges() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxSuccessiveMerges(longValue); + assertThat(opt.maxSuccessiveMerges()).isEqualTo(longValue); + } + } + + @Test + public void optimizeFiltersForHits() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean aBoolean = rand.nextBoolean(); + opt.setOptimizeFiltersForHits(aBoolean); + assertThat(opt.optimizeFiltersForHits()).isEqualTo(aBoolean); + } + } + + @Test + public void memTable() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + opt.setMemTableConfig(new HashLinkedListMemTableConfig()); + assertThat(opt.memTableFactoryName()). + isEqualTo("HashLinkedListRepFactory"); + } + } + + @Test + public void comparator() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + opt.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR); + } + } + + @Test + public void linkageOfPrepMethods() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { + options.optimizeUniversalStyleCompaction(); + options.optimizeUniversalStyleCompaction(4000); + options.optimizeLevelStyleCompaction(); + options.optimizeLevelStyleCompaction(3000); + options.optimizeForPointLookup(10); + options.optimizeForSmallDb(); + } + } + + @Test + public void shouldSetTestPrefixExtractor() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { + options.useFixedLengthPrefixExtractor(100); + options.useFixedLengthPrefixExtractor(10); + } + } + + @Test + public void shouldSetTestCappedPrefixExtractor() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { + options.useCappedPrefixExtractor(100); + options.useCappedPrefixExtractor(10); + } + } + + @Test + public void compressionTypes() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + for (final CompressionType compressionType : + CompressionType.values()) { + columnFamilyOptions.setCompressionType(compressionType); + assertThat(columnFamilyOptions.compressionType()). + isEqualTo(compressionType); + assertThat(CompressionType.valueOf("NO_COMPRESSION")). + isEqualTo(CompressionType.NO_COMPRESSION); + } + } + } + + @Test + public void compressionPerLevel() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty(); + List compressionTypeList = new ArrayList<>(); + for (int i = 0; i < columnFamilyOptions.numLevels(); i++) { + compressionTypeList.add(CompressionType.NO_COMPRESSION); + } + columnFamilyOptions.setCompressionPerLevel(compressionTypeList); + compressionTypeList = columnFamilyOptions.compressionPerLevel(); + for (CompressionType compressionType : compressionTypeList) { + assertThat(compressionType).isEqualTo( + CompressionType.NO_COMPRESSION); + } + } + } + + @Test + public void differentCompressionsPerLevel() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + columnFamilyOptions.setNumLevels(3); + + assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty(); + List compressionTypeList = new ArrayList<>(); + + compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION); + compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION); + compressionTypeList.add(CompressionType.LZ4_COMPRESSION); + + columnFamilyOptions.setCompressionPerLevel(compressionTypeList); + compressionTypeList = columnFamilyOptions.compressionPerLevel(); + + assertThat(compressionTypeList.size()).isEqualTo(3); + assertThat(compressionTypeList). + containsExactly( + CompressionType.BZLIB2_COMPRESSION, + CompressionType.SNAPPY_COMPRESSION, + CompressionType.LZ4_COMPRESSION); + + } + } + + @Test + public void bottommostCompressionType() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + assertThat(columnFamilyOptions.bottommostCompressionType()) + .isEqualTo(CompressionType.DISABLE_COMPRESSION_OPTION); + + for (final CompressionType compressionType : CompressionType.values()) { + columnFamilyOptions.setBottommostCompressionType(compressionType); + assertThat(columnFamilyOptions.bottommostCompressionType()) + .isEqualTo(compressionType); + } + } + } + + @Test + public void bottommostCompressionOptions() { + try (final ColumnFamilyOptions columnFamilyOptions = + new ColumnFamilyOptions(); + final CompressionOptions bottommostCompressionOptions = + new CompressionOptions() + .setMaxDictBytes(123)) { + + columnFamilyOptions.setBottommostCompressionOptions( + bottommostCompressionOptions); + assertThat(columnFamilyOptions.bottommostCompressionOptions()) + .isEqualTo(bottommostCompressionOptions); + assertThat(columnFamilyOptions.bottommostCompressionOptions() + .maxDictBytes()).isEqualTo(123); + } + } + + @Test + public void compressionOptions() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions(); + final CompressionOptions compressionOptions = new CompressionOptions() + .setMaxDictBytes(123)) { + + columnFamilyOptions.setCompressionOptions(compressionOptions); + assertThat(columnFamilyOptions.compressionOptions()) + .isEqualTo(compressionOptions); + assertThat(columnFamilyOptions.compressionOptions().maxDictBytes()) + .isEqualTo(123); + } + } + + @Test + public void compactionStyles() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + for (final CompactionStyle compactionStyle : + CompactionStyle.values()) { + columnFamilyOptions.setCompactionStyle(compactionStyle); + assertThat(columnFamilyOptions.compactionStyle()). + isEqualTo(compactionStyle); + assertThat(CompactionStyle.valueOf("FIFO")). + isEqualTo(CompactionStyle.FIFO); + } + } + } + + @Test + public void maxTableFilesSizeFIFO() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + long longValue = rand.nextLong(); + // Size has to be positive + longValue = (longValue < 0) ? -longValue : longValue; + longValue = (longValue == 0) ? longValue + 1 : longValue; + opt.setMaxTableFilesSizeFIFO(longValue); + assertThat(opt.maxTableFilesSizeFIFO()). + isEqualTo(longValue); + } + } + + @Test + public void maxWriteBufferNumberToMaintain() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + int intValue = rand.nextInt(); + // Size has to be positive + intValue = (intValue < 0) ? -intValue : intValue; + intValue = (intValue == 0) ? intValue + 1 : intValue; + opt.setMaxWriteBufferNumberToMaintain(intValue); + assertThat(opt.maxWriteBufferNumberToMaintain()). + isEqualTo(intValue); + } + } + + @Test + public void compactionPriorities() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + for (final CompactionPriority compactionPriority : + CompactionPriority.values()) { + opt.setCompactionPriority(compactionPriority); + assertThat(opt.compactionPriority()). + isEqualTo(compactionPriority); + } + } + } + + @Test + public void reportBgIoStats() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean booleanValue = true; + opt.setReportBgIoStats(booleanValue); + assertThat(opt.reportBgIoStats()). + isEqualTo(booleanValue); + } + } + + @Test + public void ttl() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { + options.setTtl(1000 * 60); + assertThat(options.ttl()). + isEqualTo(1000 * 60); + } + } + + @Test + public void periodicCompactionSeconds() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { + options.setPeriodicCompactionSeconds(1000 * 60); + assertThat(options.periodicCompactionSeconds()).isEqualTo(1000 * 60); + } + } + + @Test + public void compactionOptionsUniversal() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions(); + final CompactionOptionsUniversal optUni = new CompactionOptionsUniversal() + .setCompressionSizePercent(7)) { + opt.setCompactionOptionsUniversal(optUni); + assertThat(opt.compactionOptionsUniversal()). + isEqualTo(optUni); + assertThat(opt.compactionOptionsUniversal().compressionSizePercent()) + .isEqualTo(7); + } + } + + @Test + public void compactionOptionsFIFO() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions(); + final CompactionOptionsFIFO optFifo = new CompactionOptionsFIFO() + .setMaxTableFilesSize(2000)) { + opt.setCompactionOptionsFIFO(optFifo); + assertThat(opt.compactionOptionsFIFO()). + isEqualTo(optFifo); + assertThat(opt.compactionOptionsFIFO().maxTableFilesSize()) + .isEqualTo(2000); + } + } + + @Test + public void forceConsistencyChecks() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean booleanValue = true; + opt.setForceConsistencyChecks(booleanValue); + assertThat(opt.forceConsistencyChecks()). + isEqualTo(booleanValue); + } + } + + @Test + public void compactionFilter() { + try(final ColumnFamilyOptions options = new ColumnFamilyOptions(); + final RemoveEmptyValueCompactionFilter cf = new RemoveEmptyValueCompactionFilter()) { + options.setCompactionFilter(cf); + assertThat(options.compactionFilter()).isEqualTo(cf); + } + } + + @Test + public void compactionFilterFactory() { + try(final ColumnFamilyOptions options = new ColumnFamilyOptions(); + final RemoveEmptyValueCompactionFilterFactory cff = new RemoveEmptyValueCompactionFilterFactory()) { + options.setCompactionFilterFactory(cff); + assertThat(options.compactionFilterFactory()).isEqualTo(cff); + } + } + + @Test + public void compactionThreadLimiter() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions(); + final ConcurrentTaskLimiter compactionThreadLimiter = + new ConcurrentTaskLimiterImpl("name", 3)) { + options.setCompactionThreadLimiter(compactionThreadLimiter); + assertThat(options.compactionThreadLimiter()).isEqualTo(compactionThreadLimiter); + } + } + + @Test + public void oldDefaults() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { + options.oldDefaults(4, 6); + assertEquals(4 << 20, options.writeBufferSize()); + assertThat(options.compactionPriority()).isEqualTo(CompactionPriority.ByCompensatedSize); + assertThat(options.targetFileSizeBase()).isEqualTo(2 * 1048576); + assertThat(options.maxBytesForLevelBase()).isEqualTo(10 * 1048576); + assertThat(options.softPendingCompactionBytesLimit()).isEqualTo(0); + assertThat(options.hardPendingCompactionBytesLimit()).isEqualTo(0); + assertThat(options.level0StopWritesTrigger()).isEqualTo(24); + } + } + + @Test + public void optimizeForSmallDbWithCache() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions(); + final Cache cache = new LRUCache(1024)) { + assertThat(options.optimizeForSmallDb(cache)).isEqualTo(options); + } + } + + @Test + public void cfPaths() throws IOException { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { + final List paths = Arrays.asList( + new DbPath(Paths.get("test1"), 2 << 25), new DbPath(Paths.get("/test2/path"), 2 << 25)); + assertThat(options.cfPaths()).isEqualTo(Collections.emptyList()); + assertThat(options.setCfPaths(paths)).isEqualTo(options); + assertThat(options.cfPaths()).isEqualTo(paths); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java new file mode 100644 index 000000000..e98327d93 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java @@ -0,0 +1,582 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.*; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class ColumnFamilyTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void columnFamilyDescriptorName() throws RocksDBException { + final byte[] cfName = "some_name".getBytes(UTF_8); + + try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()) { + final ColumnFamilyDescriptor cfDescriptor = + new ColumnFamilyDescriptor(cfName, cfOptions); + assertThat(cfDescriptor.getName()).isEqualTo(cfName); + } + } + + @Test + public void columnFamilyDescriptorOptions() throws RocksDBException { + final byte[] cfName = "some_name".getBytes(UTF_8); + + try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions() + .setCompressionType(CompressionType.BZLIB2_COMPRESSION)) { + final ColumnFamilyDescriptor cfDescriptor = + new ColumnFamilyDescriptor(cfName, cfOptions); + + assertThat(cfDescriptor.getOptions().compressionType()) + .isEqualTo(CompressionType.BZLIB2_COMPRESSION); + } + } + + @Test + public void listColumnFamilies() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + // Test listColumnFamilies + final List columnFamilyNames = RocksDB.listColumnFamilies(options, + dbFolder.getRoot().getAbsolutePath()); + assertThat(columnFamilyNames).isNotNull(); + assertThat(columnFamilyNames.size()).isGreaterThan(0); + assertThat(columnFamilyNames.size()).isEqualTo(1); + assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default"); + } + } + + @Test + public void defaultColumnFamily() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + final ColumnFamilyHandle cfh = db.getDefaultColumnFamily(); + try { + assertThat(cfh).isNotNull(); + + assertThat(cfh.getName()).isEqualTo("default".getBytes(UTF_8)); + assertThat(cfh.getID()).isEqualTo(0); + assertThat(cfh.getDescriptor().getName()).isEqualTo("default".getBytes(UTF_8)); + + final byte[] key = "key".getBytes(); + final byte[] value = "value".getBytes(); + + db.put(cfh, key, value); + + final byte[] actualValue = db.get(cfh, key); + + assertThat(cfh).isNotNull(); + assertThat(actualValue).isEqualTo(value); + } finally { + cfh.close(); + } + } + } + + @Test + public void createColumnFamily() throws RocksDBException { + final byte[] cfName = "new_cf".getBytes(UTF_8); + final ColumnFamilyDescriptor cfDescriptor = new ColumnFamilyDescriptor(cfName, + new ColumnFamilyOptions()); + + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(cfDescriptor); + + try { + assertThat(columnFamilyHandle.getName()).isEqualTo(cfName); + assertThat(columnFamilyHandle.getID()).isEqualTo(1); + + final ColumnFamilyDescriptor latestDescriptor = columnFamilyHandle.getDescriptor(); + assertThat(latestDescriptor.getName()).isEqualTo(cfName); + + final List columnFamilyNames = RocksDB.listColumnFamilies( + options, dbFolder.getRoot().getAbsolutePath()); + assertThat(columnFamilyNames).isNotNull(); + assertThat(columnFamilyNames.size()).isGreaterThan(0); + assertThat(columnFamilyNames.size()).isEqualTo(2); + assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default"); + assertThat(new String(columnFamilyNames.get(1))).isEqualTo("new_cf"); + } finally { + columnFamilyHandle.close(); + } + } + } + + @Test + public void openWithColumnFamilies() throws RocksDBException { + final List cfNames = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes()) + ); + + final List columnFamilyHandleList = + new ArrayList<>(); + + // Test open database with column family names + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfNames, + columnFamilyHandleList)) { + assertThat(columnFamilyHandleList.size()).isEqualTo(2); + db.put("dfkey1".getBytes(), "dfvalue".getBytes()); + db.put(columnFamilyHandleList.get(0), "dfkey2".getBytes(), "dfvalue".getBytes()); + db.put(columnFamilyHandleList.get(1), "newcfkey1".getBytes(), "newcfvalue".getBytes()); + + String retVal = new String(db.get(columnFamilyHandleList.get(1), "newcfkey1".getBytes())); + assertThat(retVal).isEqualTo("newcfvalue"); + assertThat((db.get(columnFamilyHandleList.get(1), "dfkey1".getBytes()))).isNull(); + db.delete(columnFamilyHandleList.get(1), "newcfkey1".getBytes()); + assertThat((db.get(columnFamilyHandleList.get(1), "newcfkey1".getBytes()))).isNull(); + db.delete(columnFamilyHandleList.get(0), new WriteOptions(), "dfkey2".getBytes()); + assertThat(db.get(columnFamilyHandleList.get(0), new ReadOptions(), "dfkey2".getBytes())) + .isNull(); + } + } + + @Test + public void getWithOutValueAndCf() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + final List columnFamilyHandleList = new ArrayList<>(); + + // Test open database with column family names + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + db.put( + columnFamilyHandleList.get(0), new WriteOptions(), "key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + final byte[] outValue = new byte[5]; + // not found value + int getResult = db.get("keyNotFound".getBytes(), outValue); + assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND); + // found value which fits in outValue + getResult = db.get(columnFamilyHandleList.get(0), "key1".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("value".getBytes()); + // found value which fits partially + getResult = + db.get(columnFamilyHandleList.get(0), new ReadOptions(), "key2".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("12345".getBytes()); + } + } + + @Test + public void createWriteDropColumnFamily() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + ColumnFamilyHandle tmpColumnFamilyHandle; + tmpColumnFamilyHandle = db.createColumnFamily( + new ColumnFamilyDescriptor("tmpCF".getBytes(), new ColumnFamilyOptions())); + db.put(tmpColumnFamilyHandle, "key".getBytes(), "value".getBytes()); + db.dropColumnFamily(tmpColumnFamilyHandle); + assertThat(tmpColumnFamilyHandle.isOwningHandle()).isTrue(); + } + } + + @Test + public void createWriteDropColumnFamilies() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + ColumnFamilyHandle tmpColumnFamilyHandle = null; + ColumnFamilyHandle tmpColumnFamilyHandle2 = null; + tmpColumnFamilyHandle = db.createColumnFamily( + new ColumnFamilyDescriptor("tmpCF".getBytes(), new ColumnFamilyOptions())); + tmpColumnFamilyHandle2 = db.createColumnFamily( + new ColumnFamilyDescriptor("tmpCF2".getBytes(), new ColumnFamilyOptions())); + db.put(tmpColumnFamilyHandle, "key".getBytes(), "value".getBytes()); + db.put(tmpColumnFamilyHandle2, "key".getBytes(), "value".getBytes()); + db.dropColumnFamilies(Arrays.asList(tmpColumnFamilyHandle, tmpColumnFamilyHandle2)); + assertThat(tmpColumnFamilyHandle.isOwningHandle()).isTrue(); + assertThat(tmpColumnFamilyHandle2.isOwningHandle()).isTrue(); + } + } + + @Test + public void writeBatch() throws RocksDBException { + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final ColumnFamilyOptions defaultCfOptions = new ColumnFamilyOptions() + .setMergeOperator(stringAppendOperator)) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, + defaultCfOptions), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList); + final WriteBatch writeBatch = new WriteBatch(); + final WriteOptions writeOpt = new WriteOptions()) { + writeBatch.put("key".getBytes(), "value".getBytes()); + writeBatch.put(db.getDefaultColumnFamily(), "mergeKey".getBytes(), "merge".getBytes()); + writeBatch.merge(db.getDefaultColumnFamily(), "mergeKey".getBytes(), "merge".getBytes()); + writeBatch.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), "value".getBytes()); + writeBatch.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(), "value2".getBytes()); + writeBatch.delete("xyz".getBytes()); + writeBatch.delete(columnFamilyHandleList.get(1), "xyz".getBytes()); + db.write(writeOpt, writeBatch); + + assertThat(db.get(columnFamilyHandleList.get(1), "xyz".getBytes()) == null); + assertThat(new String(db.get(columnFamilyHandleList.get(1), "newcfkey".getBytes()))) + .isEqualTo("value"); + assertThat(new String(db.get(columnFamilyHandleList.get(1), "newcfkey2".getBytes()))) + .isEqualTo("value2"); + assertThat(new String(db.get("key".getBytes()))).isEqualTo("value"); + // check if key is merged + assertThat(new String(db.get(db.getDefaultColumnFamily(), "mergeKey".getBytes()))) + .isEqualTo("merge,merge"); + } + } + } + + @Test + public void iteratorOnColumnFamily() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), "value".getBytes()); + db.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(), "value2".getBytes()); + try (final RocksIterator rocksIterator = db.newIterator(columnFamilyHandleList.get(1))) { + rocksIterator.seekToFirst(); + Map refMap = new HashMap<>(); + refMap.put("newcfkey", "value"); + refMap.put("newcfkey2", "value2"); + int i = 0; + while (rocksIterator.isValid()) { + i++; + assertThat(refMap.get(new String(rocksIterator.key()))) + .isEqualTo(new String(rocksIterator.value())); + rocksIterator.next(); + } + assertThat(i).isEqualTo(2); + } + } + } + + @Test + public void multiGet() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + db.put(columnFamilyHandleList.get(0), "key".getBytes(), "value".getBytes()); + db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), "value".getBytes()); + + final List keys = + Arrays.asList(new byte[][] {"key".getBytes(), "newcfkey".getBytes()}); + + List retValues = db.multiGetAsList(columnFamilyHandleList, keys); + assertThat(retValues.size()).isEqualTo(2); + assertThat(new String(retValues.get(0))).isEqualTo("value"); + assertThat(new String(retValues.get(1))).isEqualTo("value"); + retValues = db.multiGetAsList(new ReadOptions(), columnFamilyHandleList, keys); + assertThat(retValues.size()).isEqualTo(2); + assertThat(new String(retValues.get(0))).isEqualTo("value"); + assertThat(new String(retValues.get(1))).isEqualTo("value"); + } + } + + @Test + public void multiGetAsList() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + db.put(columnFamilyHandleList.get(0), "key".getBytes(), "value".getBytes()); + db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), "value".getBytes()); + + final List keys = + Arrays.asList(new byte[][] {"key".getBytes(), "newcfkey".getBytes()}); + List retValues = db.multiGetAsList(columnFamilyHandleList, keys); + assertThat(retValues.size()).isEqualTo(2); + assertThat(new String(retValues.get(0))).isEqualTo("value"); + assertThat(new String(retValues.get(1))).isEqualTo("value"); + retValues = db.multiGetAsList(new ReadOptions(), columnFamilyHandleList, keys); + assertThat(retValues.size()).isEqualTo(2); + assertThat(new String(retValues.get(0))).isEqualTo("value"); + assertThat(new String(retValues.get(1))).isEqualTo("value"); + } + } + + @Test + public void properties() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + assertThat(db.getProperty("rocksdb.estimate-num-keys")).isNotNull(); + assertThat(db.getLongProperty(columnFamilyHandleList.get(0), "rocksdb.estimate-num-keys")) + .isGreaterThanOrEqualTo(0); + assertThat(db.getProperty("rocksdb.stats")).isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(0), "rocksdb.sstables")).isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(1), "rocksdb.estimate-num-keys")) + .isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(1), "rocksdb.stats")).isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(1), "rocksdb.sstables")).isNotNull(); + assertThat(db.getAggregatedLongProperty("rocksdb.estimate-num-keys")).isNotNull(); + assertThat(db.getAggregatedLongProperty("rocksdb.estimate-num-keys")) + .isGreaterThanOrEqualTo(0); + } + } + + + @Test + public void iterators() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + List iterators = null; + try { + iterators = db.newIterators(columnFamilyHandleList); + assertThat(iterators.size()).isEqualTo(2); + RocksIterator iter = iterators.get(0); + iter.seekToFirst(); + final Map defRefMap = new HashMap<>(); + defRefMap.put("dfkey1", "dfvalue"); + defRefMap.put("key", "value"); + while (iter.isValid()) { + assertThat(defRefMap.get(new String(iter.key()))). + isEqualTo(new String(iter.value())); + iter.next(); + } + // iterate over new_cf key/value pairs + final Map cfRefMap = new HashMap<>(); + cfRefMap.put("newcfkey", "value"); + cfRefMap.put("newcfkey2", "value2"); + iter = iterators.get(1); + iter.seekToFirst(); + while (iter.isValid()) { + assertThat(cfRefMap.get(new String(iter.key()))). + isEqualTo(new String(iter.value())); + iter.next(); + } + } finally { + if (iterators != null) { + for (final RocksIterator rocksIterator : iterators) { + rocksIterator.close(); + } + } + } + } + } + + @Test(expected = RocksDBException.class) + public void failPutDisposedCF() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + db.dropColumnFamily(columnFamilyHandleList.get(1)); + db.put(columnFamilyHandleList.get(1), "key".getBytes(), "value".getBytes()); + } + } + + @Test(expected = RocksDBException.class) + public void failRemoveDisposedCF() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + db.dropColumnFamily(columnFamilyHandleList.get(1)); + db.delete(columnFamilyHandleList.get(1), "key".getBytes()); + } + } + + @Test(expected = RocksDBException.class) + public void failGetDisposedCF() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + db.dropColumnFamily(columnFamilyHandleList.get(1)); + db.get(columnFamilyHandleList.get(1), "key".getBytes()); + } + } + + @Test(expected = RocksDBException.class) + public void failMultiGetWithoutCorrectNumberOfCF() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + final List keys = new ArrayList<>(); + keys.add("key".getBytes()); + keys.add("newcfkey".getBytes()); + final List cfCustomList = new ArrayList<>(); + db.multiGetAsList(cfCustomList, keys); + } + } + + @Test + public void testByteCreateFolumnFamily() throws RocksDBException { + + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + final byte[] b0 = new byte[]{(byte) 0x00}; + final byte[] b1 = new byte[]{(byte) 0x01}; + final byte[] b2 = new byte[]{(byte) 0x02}; + db.createColumnFamily(new ColumnFamilyDescriptor(b0)); + db.createColumnFamily(new ColumnFamilyDescriptor(b1)); + final List families = + RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); + assertThat(families).contains("default".getBytes(), b0, b1); + db.createColumnFamily(new ColumnFamilyDescriptor(b2)); + } + } + + @Test + public void testCFNamesWithZeroBytes() throws RocksDBException { + ColumnFamilyHandle cf1 = null, cf2 = null; + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + ) { + final byte[] b0 = new byte[] {0, 0}; + final byte[] b1 = new byte[] {0, 1}; + cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0)); + cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1)); + final List families = + RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); + assertThat(families).contains("default".getBytes(), b0, b1); + } + } + + @Test + public void testCFNameSimplifiedChinese() throws RocksDBException { + ColumnFamilyHandle columnFamilyHandle = null; + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + ) { + final String simplifiedChinese = "\u7b80\u4f53\u5b57"; + columnFamilyHandle = + db.createColumnFamily(new ColumnFamilyDescriptor(simplifiedChinese.getBytes())); + + final List families = + RocksDB.listColumnFamilies(options, dbFolder.getRoot().getAbsolutePath()); + assertThat(families).contains("default".getBytes(), simplifiedChinese.getBytes()); + } + } + + @Test + public void testDestroyColumnFamilyHandle() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath());) { + final byte[] name1 = "cf1".getBytes(); + final byte[] name2 = "cf2".getBytes(); + final ColumnFamilyDescriptor desc1 = new ColumnFamilyDescriptor(name1); + final ColumnFamilyDescriptor desc2 = new ColumnFamilyDescriptor(name2); + final ColumnFamilyHandle cf1 = db.createColumnFamily(desc1); + final ColumnFamilyHandle cf2 = db.createColumnFamily(desc2); + assertTrue(cf1.isOwningHandle()); + assertTrue(cf2.isOwningHandle()); + assertFalse(cf1.isDefaultColumnFamily()); + db.destroyColumnFamilyHandle(cf1); + // At this point cf1 should not be used! + assertFalse(cf1.isOwningHandle()); + assertTrue(cf2.isOwningHandle()); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java new file mode 100644 index 000000000..18c187ddb --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactRangeOptionsTest.java @@ -0,0 +1,98 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; +import org.rocksdb.CompactRangeOptions.BottommostLevelCompaction; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactRangeOptionsTest { + + static { + RocksDB.loadLibrary(); + } + + @Test + public void exclusiveManualCompaction() { + CompactRangeOptions opt = new CompactRangeOptions(); + boolean value = false; + opt.setExclusiveManualCompaction(value); + assertThat(opt.exclusiveManualCompaction()).isEqualTo(value); + value = true; + opt.setExclusiveManualCompaction(value); + assertThat(opt.exclusiveManualCompaction()).isEqualTo(value); + } + + @Test + public void bottommostLevelCompaction() { + CompactRangeOptions opt = new CompactRangeOptions(); + BottommostLevelCompaction value = BottommostLevelCompaction.kSkip; + opt.setBottommostLevelCompaction(value); + assertThat(opt.bottommostLevelCompaction()).isEqualTo(value); + value = BottommostLevelCompaction.kForce; + opt.setBottommostLevelCompaction(value); + assertThat(opt.bottommostLevelCompaction()).isEqualTo(value); + value = BottommostLevelCompaction.kIfHaveCompactionFilter; + opt.setBottommostLevelCompaction(value); + assertThat(opt.bottommostLevelCompaction()).isEqualTo(value); + } + + @Test + public void changeLevel() { + CompactRangeOptions opt = new CompactRangeOptions(); + boolean value = false; + opt.setChangeLevel(value); + assertThat(opt.changeLevel()).isEqualTo(value); + value = true; + opt.setChangeLevel(value); + assertThat(opt.changeLevel()).isEqualTo(value); + } + + @Test + public void targetLevel() { + CompactRangeOptions opt = new CompactRangeOptions(); + int value = 2; + opt.setTargetLevel(value); + assertThat(opt.targetLevel()).isEqualTo(value); + value = 3; + opt.setTargetLevel(value); + assertThat(opt.targetLevel()).isEqualTo(value); + } + + @Test + public void targetPathId() { + CompactRangeOptions opt = new CompactRangeOptions(); + int value = 2; + opt.setTargetPathId(value); + assertThat(opt.targetPathId()).isEqualTo(value); + value = 3; + opt.setTargetPathId(value); + assertThat(opt.targetPathId()).isEqualTo(value); + } + + @Test + public void allowWriteStall() { + CompactRangeOptions opt = new CompactRangeOptions(); + boolean value = false; + opt.setAllowWriteStall(value); + assertThat(opt.allowWriteStall()).isEqualTo(value); + value = true; + opt.setAllowWriteStall(value); + assertThat(opt.allowWriteStall()).isEqualTo(value); + } + + @Test + public void maxSubcompactions() { + CompactRangeOptions opt = new CompactRangeOptions(); + int value = 2; + opt.setMaxSubcompactions(value); + assertThat(opt.maxSubcompactions()).isEqualTo(value); + value = 3; + opt.setMaxSubcompactions(value); + assertThat(opt.maxSubcompactions()).isEqualTo(value); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java new file mode 100644 index 000000000..35a14eb54 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionFilterFactoryTest.java @@ -0,0 +1,61 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.test.RemoveEmptyValueCompactionFilterFactory; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionFilterFactoryTest { + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void columnFamilyOptions_setCompactionFilterFactory() + throws RocksDBException { + try(final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RemoveEmptyValueCompactionFilterFactory compactionFilterFactory + = new RemoveEmptyValueCompactionFilterFactory(); + final ColumnFamilyOptions new_cf_opts + = new ColumnFamilyOptions() + .setCompactionFilterFactory(compactionFilterFactory)) { + + final List cfNames = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)); + + final List cfHandles = new ArrayList<>(); + + try (final RocksDB rocksDb = + RocksDB.open(options, dbFolder.getRoot().getAbsolutePath(), cfNames, cfHandles)) { + final byte[] key1 = "key1".getBytes(); + final byte[] key2 = "key2".getBytes(); + + final byte[] value1 = "value1".getBytes(); + final byte[] value2 = new byte[0]; + + rocksDb.put(cfHandles.get(1), key1, value1); + rocksDb.put(cfHandles.get(1), key2, value2); + + rocksDb.compactRange(cfHandles.get(1)); + + assertThat(rocksDb.get(cfHandles.get(1), key1)).isEqualTo(value1); + final boolean exists = rocksDb.keyMayExist(cfHandles.get(1), key2, null); + assertThat(exists).isFalse(); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java new file mode 100644 index 000000000..c71b0da16 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobInfoTest.java @@ -0,0 +1,114 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionJobInfoTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void columnFamilyName() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.columnFamilyName()) + .isEmpty(); + } + } + + @Test + public void status() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.status().getCode()) + .isEqualTo(Status.Code.Ok); + } + } + + @Test + public void threadId() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.threadId()) + .isEqualTo(0); + } + } + + @Test + public void jobId() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.jobId()) + .isEqualTo(0); + } + } + + @Test + public void baseInputLevel() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.baseInputLevel()) + .isEqualTo(0); + } + } + + @Test + public void outputLevel() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.outputLevel()) + .isEqualTo(0); + } + } + + @Test + public void inputFiles() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.inputFiles()) + .isEmpty(); + } + } + + @Test + public void outputFiles() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.outputFiles()) + .isEmpty(); + } + } + + @Test + public void tableProperties() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.tableProperties()) + .isEmpty(); + } + } + + @Test + public void compactionReason() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.compactionReason()) + .isEqualTo(CompactionReason.kUnknown); + } + } + + @Test + public void compression() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.compression()) + .isEqualTo(CompressionType.NO_COMPRESSION); + } + } + + @Test + public void stats() { + try (final CompactionJobInfo compactionJobInfo = new CompactionJobInfo()) { + assertThat(compactionJobInfo.stats()) + .isNotNull(); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java new file mode 100644 index 000000000..5c1eb2aab --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionJobStatsTest.java @@ -0,0 +1,196 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionJobStatsTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void reset() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + compactionJobStats.reset(); + assertThat(compactionJobStats.elapsedMicros()).isEqualTo(0); + } + } + + @Test + public void add() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats(); + final CompactionJobStats otherCompactionJobStats = new CompactionJobStats()) { + compactionJobStats.add(otherCompactionJobStats); + } + } + + @Test + public void elapsedMicros() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.elapsedMicros()).isEqualTo(0); + } + } + + @Test + public void numInputRecords() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numInputRecords()).isEqualTo(0); + } + } + + @Test + public void numInputFiles() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numInputFiles()).isEqualTo(0); + } + } + + @Test + public void numInputFilesAtOutputLevel() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numInputFilesAtOutputLevel()).isEqualTo(0); + } + } + + @Test + public void numOutputRecords() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numOutputRecords()).isEqualTo(0); + } + } + + @Test + public void numOutputFiles() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numOutputFiles()).isEqualTo(0); + } + } + + @Test + public void isManualCompaction() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.isManualCompaction()).isFalse(); + } + } + + @Test + public void totalInputBytes() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.totalInputBytes()).isEqualTo(0); + } + } + + @Test + public void totalOutputBytes() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.totalOutputBytes()).isEqualTo(0); + } + } + + + @Test + public void numRecordsReplaced() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numRecordsReplaced()).isEqualTo(0); + } + } + + @Test + public void totalInputRawKeyBytes() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.totalInputRawKeyBytes()).isEqualTo(0); + } + } + + @Test + public void totalInputRawValueBytes() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.totalInputRawValueBytes()).isEqualTo(0); + } + } + + @Test + public void numInputDeletionRecords() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numInputDeletionRecords()).isEqualTo(0); + } + } + + @Test + public void numExpiredDeletionRecords() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numExpiredDeletionRecords()).isEqualTo(0); + } + } + + @Test + public void numCorruptKeys() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numCorruptKeys()).isEqualTo(0); + } + } + + @Test + public void fileWriteNanos() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.fileWriteNanos()).isEqualTo(0); + } + } + + @Test + public void fileRangeSyncNanos() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.fileRangeSyncNanos()).isEqualTo(0); + } + } + + @Test + public void fileFsyncNanos() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.fileFsyncNanos()).isEqualTo(0); + } + } + + @Test + public void filePrepareWriteNanos() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.filePrepareWriteNanos()).isEqualTo(0); + } + } + + @Test + public void smallestOutputKeyPrefix() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.smallestOutputKeyPrefix()).isEmpty(); + } + } + + @Test + public void largestOutputKeyPrefix() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.largestOutputKeyPrefix()).isEmpty(); + } + } + + @Test + public void numSingleDelFallthru() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numSingleDelFallthru()).isEqualTo(0); + } + } + + @Test + public void numSingleDelMismatch() { + try (final CompactionJobStats compactionJobStats = new CompactionJobStats()) { + assertThat(compactionJobStats.numSingleDelMismatch()).isEqualTo(0); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java new file mode 100644 index 000000000..841615e67 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsFIFOTest.java @@ -0,0 +1,35 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionOptionsFIFOTest { + + static { + RocksDB.loadLibrary(); + } + + @Test + public void maxTableFilesSize() { + final long size = 500 * 1024 * 1026; + try (final CompactionOptionsFIFO opt = new CompactionOptionsFIFO()) { + opt.setMaxTableFilesSize(size); + assertThat(opt.maxTableFilesSize()).isEqualTo(size); + } + } + + @Test + public void allowCompaction() { + final boolean allowCompaction = true; + try (final CompactionOptionsFIFO opt = new CompactionOptionsFIFO()) { + opt.setAllowCompaction(allowCompaction); + assertThat(opt.allowCompaction()).isEqualTo(allowCompaction); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsTest.java new file mode 100644 index 000000000..9b7d79694 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsTest.java @@ -0,0 +1,52 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionOptionsTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void compression() { + try (final CompactionOptions compactionOptions = new CompactionOptions()) { + assertThat(compactionOptions.compression()) + .isEqualTo(CompressionType.SNAPPY_COMPRESSION); + compactionOptions.setCompression(CompressionType.NO_COMPRESSION); + assertThat(compactionOptions.compression()) + .isEqualTo(CompressionType.NO_COMPRESSION); + } + } + + @Test + public void outputFileSizeLimit() { + final long mb250 = 1024 * 1024 * 250; + try (final CompactionOptions compactionOptions = new CompactionOptions()) { + assertThat(compactionOptions.outputFileSizeLimit()) + .isEqualTo(-1); + compactionOptions.setOutputFileSizeLimit(mb250); + assertThat(compactionOptions.outputFileSizeLimit()) + .isEqualTo(mb250); + } + } + + @Test + public void maxSubcompactions() { + try (final CompactionOptions compactionOptions = new CompactionOptions()) { + assertThat(compactionOptions.maxSubcompactions()) + .isEqualTo(0); + compactionOptions.setMaxSubcompactions(9); + assertThat(compactionOptions.maxSubcompactions()) + .isEqualTo(9); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java new file mode 100644 index 000000000..5e2d195b6 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionOptionsUniversalTest.java @@ -0,0 +1,80 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionOptionsUniversalTest { + + static { + RocksDB.loadLibrary(); + } + + @Test + public void sizeRatio() { + final int sizeRatio = 4; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setSizeRatio(sizeRatio); + assertThat(opt.sizeRatio()).isEqualTo(sizeRatio); + } + } + + @Test + public void minMergeWidth() { + final int minMergeWidth = 3; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setMinMergeWidth(minMergeWidth); + assertThat(opt.minMergeWidth()).isEqualTo(minMergeWidth); + } + } + + @Test + public void maxMergeWidth() { + final int maxMergeWidth = Integer.MAX_VALUE - 1234; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setMaxMergeWidth(maxMergeWidth); + assertThat(opt.maxMergeWidth()).isEqualTo(maxMergeWidth); + } + } + + @Test + public void maxSizeAmplificationPercent() { + final int maxSizeAmplificationPercent = 150; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setMaxSizeAmplificationPercent(maxSizeAmplificationPercent); + assertThat(opt.maxSizeAmplificationPercent()).isEqualTo(maxSizeAmplificationPercent); + } + } + + @Test + public void compressionSizePercent() { + final int compressionSizePercent = 500; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setCompressionSizePercent(compressionSizePercent); + assertThat(opt.compressionSizePercent()).isEqualTo(compressionSizePercent); + } + } + + @Test + public void stopStyle() { + final CompactionStopStyle stopStyle = CompactionStopStyle.CompactionStopStyleSimilarSize; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setStopStyle(stopStyle); + assertThat(opt.stopStyle()).isEqualTo(stopStyle); + } + } + + @Test + public void allowTrivialMove() { + final boolean allowTrivialMove = true; + try(final CompactionOptionsUniversal opt = new CompactionOptionsUniversal()) { + opt.setAllowTrivialMove(allowTrivialMove); + assertThat(opt.allowTrivialMove()).isEqualTo(allowTrivialMove); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java new file mode 100644 index 000000000..b078e132f --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionPriorityTest.java @@ -0,0 +1,31 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionPriorityTest { + + @Test(expected = IllegalArgumentException.class) + public void failIfIllegalByteValueProvided() { + CompactionPriority.getCompactionPriority((byte) -1); + } + + @Test + public void getCompactionPriority() { + assertThat(CompactionPriority.getCompactionPriority( + CompactionPriority.OldestLargestSeqFirst.getValue())) + .isEqualTo(CompactionPriority.OldestLargestSeqFirst); + } + + @Test + public void valueOf() { + assertThat(CompactionPriority.valueOf("OldestSmallestSeqFirst")). + isEqualTo(CompactionPriority.OldestSmallestSeqFirst); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java new file mode 100644 index 000000000..4c8a20950 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompactionStopStyleTest.java @@ -0,0 +1,31 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompactionStopStyleTest { + + @Test(expected = IllegalArgumentException.class) + public void failIfIllegalByteValueProvided() { + CompactionStopStyle.getCompactionStopStyle((byte) -1); + } + + @Test + public void getCompactionStopStyle() { + assertThat(CompactionStopStyle.getCompactionStopStyle( + CompactionStopStyle.CompactionStopStyleTotalSize.getValue())) + .isEqualTo(CompactionStopStyle.CompactionStopStyleTotalSize); + } + + @Test + public void valueOf() { + assertThat(CompactionStopStyle.valueOf("CompactionStopStyleSimilarSize")). + isEqualTo(CompactionStopStyle.CompactionStopStyleSimilarSize); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java new file mode 100644 index 000000000..3e90b9f10 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java @@ -0,0 +1,58 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ComparatorOptionsTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void reusedSynchronisationType() { + try(final ComparatorOptions copt = new ComparatorOptions()) { + + copt.setReusedSynchronisationType(ReusedSynchronisationType.MUTEX); + assertThat(copt.reusedSynchronisationType()) + .isEqualTo(ReusedSynchronisationType.MUTEX); + + copt.setReusedSynchronisationType(ReusedSynchronisationType.ADAPTIVE_MUTEX); + assertThat(copt.reusedSynchronisationType()) + .isEqualTo(ReusedSynchronisationType.ADAPTIVE_MUTEX); + + copt.setReusedSynchronisationType(ReusedSynchronisationType.THREAD_LOCAL); + assertThat(copt.reusedSynchronisationType()) + .isEqualTo(ReusedSynchronisationType.THREAD_LOCAL); + } + } + + @Test + public void useDirectBuffer() { + try(final ComparatorOptions copt = new ComparatorOptions()) { + copt.setUseDirectBuffer(true); + assertThat(copt.useDirectBuffer()).isTrue(); + + copt.setUseDirectBuffer(false); + assertThat(copt.useDirectBuffer()).isFalse(); + } + } + + @Test + public void maxReusedBufferSize() { + try(final ComparatorOptions copt = new ComparatorOptions()) { + copt.setMaxReusedBufferSize(12345); + assertThat(copt.maxReusedBufferSize()).isEqualTo(12345); + + copt.setMaxReusedBufferSize(-1); + assertThat(copt.maxReusedBufferSize()).isEqualTo(-1); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java new file mode 100644 index 000000000..116552c32 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java @@ -0,0 +1,71 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CompressionOptionsTest { + + static { + RocksDB.loadLibrary(); + } + + @Test + public void windowBits() { + final int windowBits = 7; + try(final CompressionOptions opt = new CompressionOptions()) { + opt.setWindowBits(windowBits); + assertThat(opt.windowBits()).isEqualTo(windowBits); + } + } + + @Test + public void level() { + final int level = 6; + try(final CompressionOptions opt = new CompressionOptions()) { + opt.setLevel(level); + assertThat(opt.level()).isEqualTo(level); + } + } + + @Test + public void strategy() { + final int strategy = 2; + try(final CompressionOptions opt = new CompressionOptions()) { + opt.setStrategy(strategy); + assertThat(opt.strategy()).isEqualTo(strategy); + } + } + + @Test + public void maxDictBytes() { + final int maxDictBytes = 999; + try(final CompressionOptions opt = new CompressionOptions()) { + opt.setMaxDictBytes(maxDictBytes); + assertThat(opt.maxDictBytes()).isEqualTo(maxDictBytes); + } + } + + @Test + public void zstdMaxTrainBytes() { + final int zstdMaxTrainBytes = 999; + try(final CompressionOptions opt = new CompressionOptions()) { + opt.setZStdMaxTrainBytes(zstdMaxTrainBytes); + assertThat(opt.zstdMaxTrainBytes()).isEqualTo(zstdMaxTrainBytes); + } + } + + @Test + public void enabled() { + try(final CompressionOptions opt = new CompressionOptions()) { + assertThat(opt.enabled()).isFalse(); + opt.setEnabled(true); + assertThat(opt.enabled()).isTrue(); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java new file mode 100644 index 000000000..e26cc0aca --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/CompressionTypesTest.java @@ -0,0 +1,20 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + + +public class CompressionTypesTest { + @Test + public void getCompressionType() { + for (final CompressionType compressionType : CompressionType.values()) { + String libraryName = compressionType.getLibraryName(); + compressionType.equals(CompressionType.getCompressionType( + libraryName)); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ConcurrentTaskLimiterTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ConcurrentTaskLimiterTest.java new file mode 100644 index 000000000..165f4f24c --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/ConcurrentTaskLimiterTest.java @@ -0,0 +1,56 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.junit.Assert.assertEquals; + +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; + +public class ConcurrentTaskLimiterTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + private static final String NAME = "name"; + + private ConcurrentTaskLimiter concurrentTaskLimiter; + + @Before + public void beforeTest() { + concurrentTaskLimiter = new ConcurrentTaskLimiterImpl(NAME, 3); + } + + @Test + public void name() { + assertEquals(NAME, concurrentTaskLimiter.name()); + } + + @Test + public void outstandingTask() { + assertEquals(0, concurrentTaskLimiter.outstandingTask()); + } + + @Test + public void setMaxOutstandingTask() { + assertEquals(concurrentTaskLimiter, concurrentTaskLimiter.setMaxOutstandingTask(4)); + assertEquals(0, concurrentTaskLimiter.outstandingTask()); + } + + @Test + public void resetMaxOutstandingTask() { + assertEquals(concurrentTaskLimiter, concurrentTaskLimiter.resetMaxOutstandingTask()); + assertEquals(0, concurrentTaskLimiter.outstandingTask()); + } + + @After + public void afterTest() { + concurrentTaskLimiter.close(); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java new file mode 100644 index 000000000..d55ceebcf --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java @@ -0,0 +1,904 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.nio.file.Paths; +import java.util.*; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.ClassRule; +import org.junit.Test; + +public class DBOptionsTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + public static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void copyConstructor() { + DBOptions origOpts = new DBOptions(); + origOpts.setCreateIfMissing(rand.nextBoolean()); + origOpts.setAllow2pc(rand.nextBoolean()); + origOpts.setMaxBackgroundJobs(rand.nextInt(10)); + DBOptions copyOpts = new DBOptions(origOpts); + assertThat(origOpts.createIfMissing()).isEqualTo(copyOpts.createIfMissing()); + assertThat(origOpts.allow2pc()).isEqualTo(copyOpts.allow2pc()); + } + + @Test + public void getDBOptionsFromProps() { + // setup sample properties + final Properties properties = new Properties(); + properties.put("allow_mmap_reads", "true"); + properties.put("bytes_per_sync", "13"); + try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) { + assertThat(opt).isNotNull(); + assertThat(String.valueOf(opt.allowMmapReads())). + isEqualTo(properties.get("allow_mmap_reads")); + assertThat(String.valueOf(opt.bytesPerSync())). + isEqualTo(properties.get("bytes_per_sync")); + } + } + + @Test + public void failDBOptionsFromPropsWithIllegalValue() { + // setup sample properties + final Properties properties = new Properties(); + properties.put("tomato", "1024"); + properties.put("burger", "2"); + try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) { + assertThat(opt).isNull(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void failDBOptionsFromPropsWithNullValue() { + try(final DBOptions opt = DBOptions.getDBOptionsFromProps(null)) { + //no-op + } + } + + @Test(expected = IllegalArgumentException.class) + public void failDBOptionsFromPropsWithEmptyProps() { + try(final DBOptions opt = DBOptions.getDBOptionsFromProps( + new Properties())) { + //no-op + } + } + + @Test + public void linkageOfPrepMethods() { + try (final DBOptions opt = new DBOptions()) { + opt.optimizeForSmallDb(); + } + } + + @Test + public void env() { + try (final DBOptions opt = new DBOptions(); + final Env env = Env.getDefault()) { + opt.setEnv(env); + assertThat(opt.getEnv()).isSameAs(env); + } + } + + @Test + public void setIncreaseParallelism() { + try(final DBOptions opt = new DBOptions()) { + final int threads = Runtime.getRuntime().availableProcessors() * 2; + opt.setIncreaseParallelism(threads); + } + } + + @Test + public void createIfMissing() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setCreateIfMissing(boolValue); + assertThat(opt.createIfMissing()).isEqualTo(boolValue); + } + } + + @Test + public void createMissingColumnFamilies() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setCreateMissingColumnFamilies(boolValue); + assertThat(opt.createMissingColumnFamilies()).isEqualTo(boolValue); + } + } + + @Test + public void errorIfExists() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setErrorIfExists(boolValue); + assertThat(opt.errorIfExists()).isEqualTo(boolValue); + } + } + + @Test + public void paranoidChecks() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setParanoidChecks(boolValue); + assertThat(opt.paranoidChecks()).isEqualTo(boolValue); + } + } + + @Test + public void maxTotalWalSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxTotalWalSize(longValue); + assertThat(opt.maxTotalWalSize()).isEqualTo(longValue); + } + } + + @Test + public void maxOpenFiles() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxOpenFiles(intValue); + assertThat(opt.maxOpenFiles()).isEqualTo(intValue); + } + } + + @Test + public void maxFileOpeningThreads() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxFileOpeningThreads(intValue); + assertThat(opt.maxFileOpeningThreads()).isEqualTo(intValue); + } + } + + @Test + public void useFsync() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseFsync(boolValue); + assertThat(opt.useFsync()).isEqualTo(boolValue); + } + } + + @Test + public void dbPaths() { + final List dbPaths = new ArrayList<>(); + dbPaths.add(new DbPath(Paths.get("/a"), 10)); + dbPaths.add(new DbPath(Paths.get("/b"), 100)); + dbPaths.add(new DbPath(Paths.get("/c"), 1000)); + + try(final DBOptions opt = new DBOptions()) { + assertThat(opt.dbPaths()).isEqualTo(Collections.emptyList()); + + opt.setDbPaths(dbPaths); + + assertThat(opt.dbPaths()).isEqualTo(dbPaths); + } + } + + @Test + public void dbLogDir() { + try(final DBOptions opt = new DBOptions()) { + final String str = "path/to/DbLogDir"; + opt.setDbLogDir(str); + assertThat(opt.dbLogDir()).isEqualTo(str); + } + } + + @Test + public void walDir() { + try(final DBOptions opt = new DBOptions()) { + final String str = "path/to/WalDir"; + opt.setWalDir(str); + assertThat(opt.walDir()).isEqualTo(str); + } + } + + @Test + public void deleteObsoleteFilesPeriodMicros() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setDeleteObsoleteFilesPeriodMicros(longValue); + assertThat(opt.deleteObsoleteFilesPeriodMicros()).isEqualTo(longValue); + } + } + + @SuppressWarnings("deprecated") + @Test + public void maxBackgroundCompactions() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxBackgroundCompactions(intValue); + assertThat(opt.maxBackgroundCompactions()).isEqualTo(intValue); + } + } + + @Test + public void maxSubcompactions() { + try (final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxSubcompactions(intValue); + assertThat(opt.maxSubcompactions()). + isEqualTo(intValue); + } + } + + @SuppressWarnings("deprecated") + @Test + public void maxBackgroundFlushes() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxBackgroundFlushes(intValue); + assertThat(opt.maxBackgroundFlushes()).isEqualTo(intValue); + } + } + + @Test + public void maxBackgroundJobs() { + try (final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxBackgroundJobs(intValue); + assertThat(opt.maxBackgroundJobs()).isEqualTo(intValue); + } + } + + @Test + public void maxLogFileSize() throws RocksDBException { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxLogFileSize(longValue); + assertThat(opt.maxLogFileSize()).isEqualTo(longValue); + } + } + + @Test + public void logFileTimeToRoll() throws RocksDBException { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setLogFileTimeToRoll(longValue); + assertThat(opt.logFileTimeToRoll()).isEqualTo(longValue); + } + } + + @Test + public void keepLogFileNum() throws RocksDBException { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setKeepLogFileNum(longValue); + assertThat(opt.keepLogFileNum()).isEqualTo(longValue); + } + } + + @Test + public void recycleLogFileNum() throws RocksDBException { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setRecycleLogFileNum(longValue); + assertThat(opt.recycleLogFileNum()).isEqualTo(longValue); + } + } + + @Test + public void maxManifestFileSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxManifestFileSize(longValue); + assertThat(opt.maxManifestFileSize()).isEqualTo(longValue); + } + } + + @Test + public void tableCacheNumshardbits() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setTableCacheNumshardbits(intValue); + assertThat(opt.tableCacheNumshardbits()).isEqualTo(intValue); + } + } + + @Test + public void walSizeLimitMB() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setWalSizeLimitMB(longValue); + assertThat(opt.walSizeLimitMB()).isEqualTo(longValue); + } + } + + @Test + public void walTtlSeconds() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setWalTtlSeconds(longValue); + assertThat(opt.walTtlSeconds()).isEqualTo(longValue); + } + } + + @Test + public void manifestPreallocationSize() throws RocksDBException { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setManifestPreallocationSize(longValue); + assertThat(opt.manifestPreallocationSize()).isEqualTo(longValue); + } + } + + @Test + public void useDirectReads() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseDirectReads(boolValue); + assertThat(opt.useDirectReads()).isEqualTo(boolValue); + } + } + + @Test + public void useDirectIoForFlushAndCompaction() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseDirectIoForFlushAndCompaction(boolValue); + assertThat(opt.useDirectIoForFlushAndCompaction()).isEqualTo(boolValue); + } + } + + @Test + public void allowFAllocate() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowFAllocate(boolValue); + assertThat(opt.allowFAllocate()).isEqualTo(boolValue); + } + } + + @Test + public void allowMmapReads() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowMmapReads(boolValue); + assertThat(opt.allowMmapReads()).isEqualTo(boolValue); + } + } + + @Test + public void allowMmapWrites() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowMmapWrites(boolValue); + assertThat(opt.allowMmapWrites()).isEqualTo(boolValue); + } + } + + @Test + public void isFdCloseOnExec() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setIsFdCloseOnExec(boolValue); + assertThat(opt.isFdCloseOnExec()).isEqualTo(boolValue); + } + } + + @Test + public void statsDumpPeriodSec() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setStatsDumpPeriodSec(intValue); + assertThat(opt.statsDumpPeriodSec()).isEqualTo(intValue); + } + } + + @Test + public void statsPersistPeriodSec() { + try (final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setStatsPersistPeriodSec(intValue); + assertThat(opt.statsPersistPeriodSec()).isEqualTo(intValue); + } + } + + @Test + public void statsHistoryBufferSize() { + try (final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setStatsHistoryBufferSize(longValue); + assertThat(opt.statsHistoryBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void adviseRandomOnOpen() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAdviseRandomOnOpen(boolValue); + assertThat(opt.adviseRandomOnOpen()).isEqualTo(boolValue); + } + } + + @Test + public void dbWriteBufferSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setDbWriteBufferSize(longValue); + assertThat(opt.dbWriteBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void setWriteBufferManager() throws RocksDBException { + try (final DBOptions opt = new DBOptions(); + final Cache cache = new LRUCache(1 * 1024 * 1024); + final WriteBufferManager writeBufferManager = new WriteBufferManager(2000l, cache)) { + opt.setWriteBufferManager(writeBufferManager); + assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager); + } + } + + @Test + public void setWriteBufferManagerWithZeroBufferSize() throws RocksDBException { + try (final DBOptions opt = new DBOptions(); + final Cache cache = new LRUCache(1 * 1024 * 1024); + final WriteBufferManager writeBufferManager = new WriteBufferManager(0l, cache)) { + opt.setWriteBufferManager(writeBufferManager); + assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager); + } + } + + @Test + public void accessHintOnCompactionStart() { + try(final DBOptions opt = new DBOptions()) { + final AccessHint accessHint = AccessHint.SEQUENTIAL; + opt.setAccessHintOnCompactionStart(accessHint); + assertThat(opt.accessHintOnCompactionStart()).isEqualTo(accessHint); + } + } + + @Test + public void compactionReadaheadSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setCompactionReadaheadSize(longValue); + assertThat(opt.compactionReadaheadSize()).isEqualTo(longValue); + } + } + + @Test + public void randomAccessMaxBufferSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setRandomAccessMaxBufferSize(longValue); + assertThat(opt.randomAccessMaxBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void writableFileMaxBufferSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setWritableFileMaxBufferSize(longValue); + assertThat(opt.writableFileMaxBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void useAdaptiveMutex() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseAdaptiveMutex(boolValue); + assertThat(opt.useAdaptiveMutex()).isEqualTo(boolValue); + } + } + + @Test + public void bytesPerSync() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setBytesPerSync(longValue); + assertThat(opt.bytesPerSync()).isEqualTo(longValue); + } + } + + @Test + public void walBytesPerSync() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setWalBytesPerSync(longValue); + assertThat(opt.walBytesPerSync()).isEqualTo(longValue); + } + } + + @Test + public void strictBytesPerSync() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.strictBytesPerSync()).isFalse(); + opt.setStrictBytesPerSync(true); + assertThat(opt.strictBytesPerSync()).isTrue(); + } + } + + @Test + public void enableThreadTracking() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setEnableThreadTracking(boolValue); + assertThat(opt.enableThreadTracking()).isEqualTo(boolValue); + } + } + + @Test + public void delayedWriteRate() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setDelayedWriteRate(longValue); + assertThat(opt.delayedWriteRate()).isEqualTo(longValue); + } + } + + @Test + public void enablePipelinedWrite() { + try(final DBOptions opt = new DBOptions()) { + assertThat(opt.enablePipelinedWrite()).isFalse(); + opt.setEnablePipelinedWrite(true); + assertThat(opt.enablePipelinedWrite()).isTrue(); + } + } + + @Test + public void unordredWrite() { + try(final DBOptions opt = new DBOptions()) { + assertThat(opt.unorderedWrite()).isFalse(); + opt.setUnorderedWrite(true); + assertThat(opt.unorderedWrite()).isTrue(); + } + } + + @Test + public void allowConcurrentMemtableWrite() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowConcurrentMemtableWrite(boolValue); + assertThat(opt.allowConcurrentMemtableWrite()).isEqualTo(boolValue); + } + } + + @Test + public void enableWriteThreadAdaptiveYield() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setEnableWriteThreadAdaptiveYield(boolValue); + assertThat(opt.enableWriteThreadAdaptiveYield()).isEqualTo(boolValue); + } + } + + @Test + public void writeThreadMaxYieldUsec() { + try (final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setWriteThreadMaxYieldUsec(longValue); + assertThat(opt.writeThreadMaxYieldUsec()).isEqualTo(longValue); + } + } + + @Test + public void writeThreadSlowYieldUsec() { + try (final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setWriteThreadSlowYieldUsec(longValue); + assertThat(opt.writeThreadSlowYieldUsec()).isEqualTo(longValue); + } + } + + @Test + public void skipStatsUpdateOnDbOpen() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setSkipStatsUpdateOnDbOpen(boolValue); + assertThat(opt.skipStatsUpdateOnDbOpen()).isEqualTo(boolValue); + } + } + + @Test + public void walRecoveryMode() { + try (final DBOptions opt = new DBOptions()) { + for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) { + opt.setWalRecoveryMode(walRecoveryMode); + assertThat(opt.walRecoveryMode()).isEqualTo(walRecoveryMode); + } + } + } + + @Test + public void allow2pc() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllow2pc(boolValue); + assertThat(opt.allow2pc()).isEqualTo(boolValue); + } + } + + @Test + public void rowCache() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.rowCache()).isNull(); + + try(final Cache lruCache = new LRUCache(1000)) { + opt.setRowCache(lruCache); + assertThat(opt.rowCache()).isEqualTo(lruCache); + } + + try(final Cache clockCache = new ClockCache(1000)) { + opt.setRowCache(clockCache); + assertThat(opt.rowCache()).isEqualTo(clockCache); + } + } + } + + @Test + public void walFilter() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.walFilter()).isNull(); + + try (final AbstractWalFilter walFilter = new AbstractWalFilter() { + @Override + public void columnFamilyLogNumberMap( + final Map cfLognumber, + final Map cfNameId) { + // no-op + } + + @Override + public LogRecordFoundResult logRecordFound(final long logNumber, + final String logFileName, final WriteBatch batch, + final WriteBatch newBatch) { + return new LogRecordFoundResult( + WalProcessingOption.CONTINUE_PROCESSING, false); + } + + @Override + public String name() { + return "test-wal-filter"; + } + }) { + opt.setWalFilter(walFilter); + assertThat(opt.walFilter()).isEqualTo(walFilter); + } + } + } + + @Test + public void failIfOptionsFileError() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setFailIfOptionsFileError(boolValue); + assertThat(opt.failIfOptionsFileError()).isEqualTo(boolValue); + } + } + + @Test + public void dumpMallocStats() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDumpMallocStats(boolValue); + assertThat(opt.dumpMallocStats()).isEqualTo(boolValue); + } + } + + @Test + public void avoidFlushDuringRecovery() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAvoidFlushDuringRecovery(boolValue); + assertThat(opt.avoidFlushDuringRecovery()).isEqualTo(boolValue); + } + } + + @Test + public void avoidFlushDuringShutdown() { + try (final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAvoidFlushDuringShutdown(boolValue); + assertThat(opt.avoidFlushDuringShutdown()).isEqualTo(boolValue); + } + } + + @Test + public void allowIngestBehind() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.allowIngestBehind()).isFalse(); + opt.setAllowIngestBehind(true); + assertThat(opt.allowIngestBehind()).isTrue(); + } + } + + @Test + public void twoWriteQueues() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.twoWriteQueues()).isFalse(); + opt.setTwoWriteQueues(true); + assertThat(opt.twoWriteQueues()).isTrue(); + } + } + + @Test + public void manualWalFlush() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.manualWalFlush()).isFalse(); + opt.setManualWalFlush(true); + assertThat(opt.manualWalFlush()).isTrue(); + } + } + + @Test + public void atomicFlush() { + try (final DBOptions opt = new DBOptions()) { + assertThat(opt.atomicFlush()).isFalse(); + opt.setAtomicFlush(true); + assertThat(opt.atomicFlush()).isTrue(); + } + } + + @Test + public void rateLimiter() { + try(final DBOptions options = new DBOptions(); + final DBOptions anotherOptions = new DBOptions(); + final RateLimiter rateLimiter = new RateLimiter(1000, 100 * 1000, 1)) { + options.setRateLimiter(rateLimiter); + // Test with parameter initialization + anotherOptions.setRateLimiter( + new RateLimiter(1000)); + } + } + + @Test + public void sstFileManager() throws RocksDBException { + try (final DBOptions options = new DBOptions(); + final SstFileManager sstFileManager = + new SstFileManager(Env.getDefault())) { + options.setSstFileManager(sstFileManager); + } + } + + @Test + public void statistics() { + try(final DBOptions options = new DBOptions()) { + final Statistics statistics = options.statistics(); + assertThat(statistics).isNull(); + } + + try(final Statistics statistics = new Statistics(); + final DBOptions options = new DBOptions().setStatistics(statistics); + final Statistics stats = options.statistics()) { + assertThat(stats).isNotNull(); + } + } + + @Test + public void avoidUnnecessaryBlockingIO() { + try (final DBOptions options = new DBOptions()) { + assertThat(options.avoidUnnecessaryBlockingIO()).isEqualTo(false); + assertThat(options.setAvoidUnnecessaryBlockingIO(true)).isEqualTo(options); + assertThat(options.avoidUnnecessaryBlockingIO()).isEqualTo(true); + } + } + + @Test + public void persistStatsToDisk() { + try (final DBOptions options = new DBOptions()) { + assertThat(options.persistStatsToDisk()).isEqualTo(false); + assertThat(options.setPersistStatsToDisk(true)).isEqualTo(options); + assertThat(options.persistStatsToDisk()).isEqualTo(true); + } + } + + @Test + public void writeDbidToManifest() { + try (final DBOptions options = new DBOptions()) { + assertThat(options.writeDbidToManifest()).isEqualTo(false); + assertThat(options.setWriteDbidToManifest(true)).isEqualTo(options); + assertThat(options.writeDbidToManifest()).isEqualTo(true); + } + } + + @Test + public void logReadaheadSize() { + try (final DBOptions options = new DBOptions()) { + assertThat(options.logReadaheadSize()).isEqualTo(0); + final int size = 1024 * 1024 * 100; + assertThat(options.setLogReadaheadSize(size)).isEqualTo(options); + assertThat(options.logReadaheadSize()).isEqualTo(size); + } + } + + @Test + public void bestEffortsRecovery() { + try (final DBOptions options = new DBOptions()) { + assertThat(options.bestEffortsRecovery()).isEqualTo(false); + assertThat(options.setBestEffortsRecovery(true)).isEqualTo(options); + assertThat(options.bestEffortsRecovery()).isEqualTo(true); + } + } + + @Test + public void maxBgerrorResumeCount() { + try (final DBOptions options = new DBOptions()) { + final int INT_MAX = 2147483647; + assertThat(options.maxBgerrorResumeCount()).isEqualTo(INT_MAX); + assertThat(options.setMaxBgErrorResumeCount(-1)).isEqualTo(options); + assertThat(options.maxBgerrorResumeCount()).isEqualTo(-1); + } + } + + @Test + public void bgerrorResumeRetryInterval() { + try (final DBOptions options = new DBOptions()) { + assertThat(options.bgerrorResumeRetryInterval()).isEqualTo(1000000); + final long newRetryInterval = 24 * 3600 * 1000000L; + assertThat(options.setBgerrorResumeRetryInterval(newRetryInterval)).isEqualTo(options); + assertThat(options.bgerrorResumeRetryInterval()).isEqualTo(newRetryInterval); + } + } + + @Test + public void maxWriteBatchGroupSizeBytes() { + try (final DBOptions options = new DBOptions()) { + assertThat(options.maxWriteBatchGroupSizeBytes()).isEqualTo(1024 * 1024); + final long size = 1024 * 1024 * 1024 * 10L; + assertThat(options.setMaxWriteBatchGroupSizeBytes(size)).isEqualTo(options); + assertThat(options.maxWriteBatchGroupSizeBytes()).isEqualTo(size); + } + } + + @Test + public void skipCheckingSstFileSizesOnDbOpen() { + try (final DBOptions options = new DBOptions()) { + assertThat(options.skipCheckingSstFileSizesOnDbOpen()).isEqualTo(false); + assertThat(options.setSkipCheckingSstFileSizesOnDbOpen(true)).isEqualTo(options); + assertThat(options.skipCheckingSstFileSizesOnDbOpen()).isEqualTo(true); + } + } + + @Test + public void eventListeners() { + final AtomicBoolean wasCalled1 = new AtomicBoolean(); + final AtomicBoolean wasCalled2 = new AtomicBoolean(); + try (final DBOptions options = new DBOptions(); + final AbstractEventListener el1 = + new AbstractEventListener() { + @Override + public void onTableFileDeleted(final TableFileDeletionInfo tableFileDeletionInfo) { + wasCalled1.set(true); + } + }; + final AbstractEventListener el2 = + new AbstractEventListener() { + @Override + public void onMemTableSealed(final MemTableInfo memTableInfo) { + wasCalled2.set(true); + } + }) { + assertThat(options.setListeners(Arrays.asList(el1, el2))).isEqualTo(options); + List listeners = options.listeners(); + assertEquals(el1, listeners.get(0)); + assertEquals(el2, listeners.get(1)); + options.setListeners(Collections.emptyList()); + listeners.get(0).onTableFileDeleted(null); + assertTrue(wasCalled1.get()); + listeners.get(1).onMemTableSealed(null); + assertTrue(wasCalled2.get()); + List listeners2 = options.listeners(); + assertNotNull(listeners2); + assertEquals(0, listeners2.size()); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/DefaultEnvTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/DefaultEnvTest.java new file mode 100644 index 000000000..3fb563ecb --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/DefaultEnvTest.java @@ -0,0 +1,113 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.Collection; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class DefaultEnvTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void backgroundThreads() { + try (final Env defaultEnv = RocksEnv.getDefault()) { + defaultEnv.setBackgroundThreads(5, Priority.BOTTOM); + assertThat(defaultEnv.getBackgroundThreads(Priority.BOTTOM)).isEqualTo(5); + + defaultEnv.setBackgroundThreads(5); + assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isEqualTo(5); + + defaultEnv.setBackgroundThreads(5, Priority.LOW); + assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isEqualTo(5); + + defaultEnv.setBackgroundThreads(5, Priority.HIGH); + assertThat(defaultEnv.getBackgroundThreads(Priority.HIGH)).isEqualTo(5); + } + } + + @Test + public void threadPoolQueueLen() { + try (final Env defaultEnv = RocksEnv.getDefault()) { + assertThat(defaultEnv.getThreadPoolQueueLen(Priority.BOTTOM)).isEqualTo(0); + assertThat(defaultEnv.getThreadPoolQueueLen(Priority.LOW)).isEqualTo(0); + assertThat(defaultEnv.getThreadPoolQueueLen(Priority.HIGH)).isEqualTo(0); + } + } + + @Test + public void incBackgroundThreadsIfNeeded() { + try (final Env defaultEnv = RocksEnv.getDefault()) { + defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.BOTTOM); + assertThat(defaultEnv.getBackgroundThreads(Priority.BOTTOM)).isGreaterThanOrEqualTo(20); + + defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.LOW); + assertThat(defaultEnv.getBackgroundThreads(Priority.LOW)).isGreaterThanOrEqualTo(20); + + defaultEnv.incBackgroundThreadsIfNeeded(20, Priority.HIGH); + assertThat(defaultEnv.getBackgroundThreads(Priority.HIGH)).isGreaterThanOrEqualTo(20); + } + } + + @Test + public void lowerThreadPoolIOPriority() { + try (final Env defaultEnv = RocksEnv.getDefault()) { + defaultEnv.lowerThreadPoolIOPriority(Priority.BOTTOM); + + defaultEnv.lowerThreadPoolIOPriority(Priority.LOW); + + defaultEnv.lowerThreadPoolIOPriority(Priority.HIGH); + } + } + + @Test + public void lowerThreadPoolCPUPriority() { + try (final Env defaultEnv = RocksEnv.getDefault()) { + defaultEnv.lowerThreadPoolCPUPriority(Priority.BOTTOM); + + defaultEnv.lowerThreadPoolCPUPriority(Priority.LOW); + + defaultEnv.lowerThreadPoolCPUPriority(Priority.HIGH); + } + } + + @Test + public void threadList() throws RocksDBException { + try (final Env defaultEnv = RocksEnv.getDefault()) { + final Collection threadList = defaultEnv.getThreadList(); + assertThat(threadList.size()).isGreaterThan(0); + } + } + + @Test + public void threadList_integration() throws RocksDBException { + try (final Env env = RocksEnv.getDefault(); + final Options opt = new Options() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true) + .setEnv(env)) { + // open database + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + + final List threadList = env.getThreadList(); + assertThat(threadList.size()).isGreaterThan(0); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java new file mode 100644 index 000000000..67385345c --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java @@ -0,0 +1,93 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import java.nio.ByteBuffer; + +import static org.assertj.core.api.Assertions.assertThat; + +public class DirectSliceTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void directSlice() { + try(final DirectSlice directSlice = new DirectSlice("abc"); + final DirectSlice otherSlice = new DirectSlice("abc")) { + assertThat(directSlice.toString()).isEqualTo("abc"); + // clear first slice + directSlice.clear(); + assertThat(directSlice.toString()).isEmpty(); + // get first char in otherslice + assertThat(otherSlice.get(0)).isEqualTo("a".getBytes()[0]); + // remove prefix + otherSlice.removePrefix(1); + assertThat(otherSlice.toString()).isEqualTo("bc"); + } + } + + @Test + public void directSliceWithByteBuffer() { + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length + 1); + buffer.put(data); + buffer.put(data.length, (byte)0); + + try(final DirectSlice directSlice = new DirectSlice(buffer)) { + assertThat(directSlice.toString()).isEqualTo("Some text"); + } + } + + @Test + public void directSliceWithByteBufferAndLength() { + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length); + buffer.put(data); + try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) { + assertThat(directSlice.toString()).isEqualTo("Some"); + } + } + + @Test(expected = IllegalArgumentException.class) + public void directSliceInitWithoutDirectAllocation() { + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.wrap(data); + try(final DirectSlice directSlice = new DirectSlice(buffer)) { + //no-op + } + } + + @Test(expected = IllegalArgumentException.class) + public void directSlicePrefixInitWithoutDirectAllocation() { + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.wrap(data); + try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) { + //no-op + } + } + + @Test + public void directSliceClear() { + try(final DirectSlice directSlice = new DirectSlice("abc")) { + assertThat(directSlice.toString()).isEqualTo("abc"); + directSlice.clear(); + assertThat(directSlice.toString()).isEmpty(); + directSlice.clear(); // make sure we don't double-free + } + } + + @Test + public void directSliceRemovePrefix() { + try(final DirectSlice directSlice = new DirectSlice("abc")) { + assertThat(directSlice.toString()).isEqualTo("abc"); + directSlice.removePrefix(1); + assertThat(directSlice.toString()).isEqualTo("bc"); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java new file mode 100644 index 000000000..0f3d8e234 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/EnvOptionsTest.java @@ -0,0 +1,145 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; + +public class EnvOptionsTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = new RocksNativeLibraryResource(); + + public static final Random rand = PlatformRandomHelper.getPlatformSpecificRandomFactory(); + + @Test + public void dbOptionsConstructor() { + final long compactionReadaheadSize = 4 * 1024 * 1024; + try (final DBOptions dbOptions = new DBOptions() + .setCompactionReadaheadSize(compactionReadaheadSize)) { + try (final EnvOptions envOptions = new EnvOptions(dbOptions)) { + assertThat(envOptions.compactionReadaheadSize()) + .isEqualTo(compactionReadaheadSize); + } + } + } + + @Test + public void useMmapReads() { + try (final EnvOptions envOptions = new EnvOptions()) { + final boolean boolValue = rand.nextBoolean(); + envOptions.setUseMmapReads(boolValue); + assertThat(envOptions.useMmapReads()).isEqualTo(boolValue); + } + } + + @Test + public void useMmapWrites() { + try (final EnvOptions envOptions = new EnvOptions()) { + final boolean boolValue = rand.nextBoolean(); + envOptions.setUseMmapWrites(boolValue); + assertThat(envOptions.useMmapWrites()).isEqualTo(boolValue); + } + } + + @Test + public void useDirectReads() { + try (final EnvOptions envOptions = new EnvOptions()) { + final boolean boolValue = rand.nextBoolean(); + envOptions.setUseDirectReads(boolValue); + assertThat(envOptions.useDirectReads()).isEqualTo(boolValue); + } + } + + @Test + public void useDirectWrites() { + try (final EnvOptions envOptions = new EnvOptions()) { + final boolean boolValue = rand.nextBoolean(); + envOptions.setUseDirectWrites(boolValue); + assertThat(envOptions.useDirectWrites()).isEqualTo(boolValue); + } + } + + @Test + public void allowFallocate() { + try (final EnvOptions envOptions = new EnvOptions()) { + final boolean boolValue = rand.nextBoolean(); + envOptions.setAllowFallocate(boolValue); + assertThat(envOptions.allowFallocate()).isEqualTo(boolValue); + } + } + + @Test + public void setFdCloexecs() { + try (final EnvOptions envOptions = new EnvOptions()) { + final boolean boolValue = rand.nextBoolean(); + envOptions.setSetFdCloexec(boolValue); + assertThat(envOptions.setFdCloexec()).isEqualTo(boolValue); + } + } + + @Test + public void bytesPerSync() { + try (final EnvOptions envOptions = new EnvOptions()) { + final long longValue = rand.nextLong(); + envOptions.setBytesPerSync(longValue); + assertThat(envOptions.bytesPerSync()).isEqualTo(longValue); + } + } + + @Test + public void fallocateWithKeepSize() { + try (final EnvOptions envOptions = new EnvOptions()) { + final boolean boolValue = rand.nextBoolean(); + envOptions.setFallocateWithKeepSize(boolValue); + assertThat(envOptions.fallocateWithKeepSize()).isEqualTo(boolValue); + } + } + + @Test + public void compactionReadaheadSize() { + try (final EnvOptions envOptions = new EnvOptions()) { + final int intValue = rand.nextInt(2147483647); + envOptions.setCompactionReadaheadSize(intValue); + assertThat(envOptions.compactionReadaheadSize()).isEqualTo(intValue); + } + } + + @Test + public void randomAccessMaxBufferSize() { + try (final EnvOptions envOptions = new EnvOptions()) { + final int intValue = rand.nextInt(2147483647); + envOptions.setRandomAccessMaxBufferSize(intValue); + assertThat(envOptions.randomAccessMaxBufferSize()).isEqualTo(intValue); + } + } + + @Test + public void writableFileMaxBufferSize() { + try (final EnvOptions envOptions = new EnvOptions()) { + final int intValue = rand.nextInt(2147483647); + envOptions.setWritableFileMaxBufferSize(intValue); + assertThat(envOptions.writableFileMaxBufferSize()).isEqualTo(intValue); + } + } + + @Test + public void rateLimiter() { + try (final EnvOptions envOptions = new EnvOptions(); + final RateLimiter rateLimiter1 = new RateLimiter(1000, 100 * 1000, 1)) { + envOptions.setRateLimiter(rateLimiter1); + assertThat(envOptions.rateLimiter()).isEqualTo(rateLimiter1); + + try(final RateLimiter rateLimiter2 = new RateLimiter(1000)) { + envOptions.setRateLimiter(rateLimiter2); + assertThat(envOptions.rateLimiter()).isEqualTo(rateLimiter2); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/EventListenerTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/EventListenerTest.java new file mode 100644 index 000000000..aec0af617 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/EventListenerTest.java @@ -0,0 +1,725 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.*; +import java.util.concurrent.atomic.AtomicBoolean; +import org.assertj.core.api.AbstractObjectAssert; +import org.assertj.core.api.ObjectAssert; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.AbstractEventListener.EnabledEventCallback; +import org.rocksdb.test.TestableEventListener; + +public class EventListenerTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + public static final Random rand = PlatformRandomHelper.getPlatformSpecificRandomFactory(); + + void flushDb(final AbstractEventListener el, final AtomicBoolean wasCbCalled) + throws RocksDBException { + try (final Options opt = + new Options().setCreateIfMissing(true).setListeners(Collections.singletonList(el)); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + assertThat(db).isNotNull(); + final byte[] value = new byte[24]; + rand.nextBytes(value); + db.put("testKey".getBytes(), value); + db.flush(new FlushOptions()); + assertThat(wasCbCalled.get()).isTrue(); + } + } + + @Test + public void onFlushCompleted() throws RocksDBException { + final AtomicBoolean wasCbCalled = new AtomicBoolean(); + final AbstractEventListener onFlushCompletedListener = new AbstractEventListener() { + @Override + public void onFlushCompleted(final RocksDB rocksDb, final FlushJobInfo flushJobInfo) { + assertThat(flushJobInfo.getColumnFamilyName()).isNotNull(); + assertThat(flushJobInfo.getFlushReason()).isEqualTo(FlushReason.MANUAL_FLUSH); + wasCbCalled.set(true); + } + }; + flushDb(onFlushCompletedListener, wasCbCalled); + } + + @Test + public void onFlushBegin() throws RocksDBException { + final AtomicBoolean wasCbCalled = new AtomicBoolean(); + final AbstractEventListener onFlushBeginListener = new AbstractEventListener() { + @Override + public void onFlushBegin(final RocksDB rocksDb, final FlushJobInfo flushJobInfo) { + assertThat(flushJobInfo.getColumnFamilyName()).isNotNull(); + assertThat(flushJobInfo.getFlushReason()).isEqualTo(FlushReason.MANUAL_FLUSH); + wasCbCalled.set(true); + } + }; + flushDb(onFlushBeginListener, wasCbCalled); + } + + void deleteTableFile(final AbstractEventListener el, final AtomicBoolean wasCbCalled) + throws RocksDBException { + try (final Options opt = + new Options().setCreateIfMissing(true).setListeners(Collections.singletonList(el)); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + assertThat(db).isNotNull(); + final byte[] value = new byte[24]; + rand.nextBytes(value); + db.put("testKey".getBytes(), value); + final RocksDB.LiveFiles liveFiles = db.getLiveFiles(); + assertThat(liveFiles).isNotNull(); + assertThat(liveFiles.files).isNotNull(); + assertThat(liveFiles.files.isEmpty()).isFalse(); + db.deleteFile(liveFiles.files.get(0)); + assertThat(wasCbCalled.get()).isTrue(); + } + } + + @Test + public void onTableFileDeleted() throws RocksDBException { + final AtomicBoolean wasCbCalled = new AtomicBoolean(); + final AbstractEventListener onTableFileDeletedListener = new AbstractEventListener() { + @Override + public void onTableFileDeleted(final TableFileDeletionInfo tableFileDeletionInfo) { + assertThat(tableFileDeletionInfo.getDbName()).isNotNull(); + wasCbCalled.set(true); + } + }; + deleteTableFile(onTableFileDeletedListener, wasCbCalled); + } + + void compactRange(final AbstractEventListener el, final AtomicBoolean wasCbCalled) + throws RocksDBException { + try (final Options opt = + new Options().setCreateIfMissing(true).setListeners(Collections.singletonList(el)); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + assertThat(db).isNotNull(); + final byte[] value = new byte[24]; + rand.nextBytes(value); + db.put("testKey".getBytes(), value); + db.compactRange(); + assertThat(wasCbCalled.get()).isTrue(); + } + } + + @Test + public void onCompactionBegin() throws RocksDBException { + final AtomicBoolean wasCbCalled = new AtomicBoolean(); + final AbstractEventListener onCompactionBeginListener = new AbstractEventListener() { + @Override + public void onCompactionBegin(final RocksDB db, final CompactionJobInfo compactionJobInfo) { + assertThat(compactionJobInfo.compactionReason()) + .isEqualTo(CompactionReason.kManualCompaction); + wasCbCalled.set(true); + } + }; + compactRange(onCompactionBeginListener, wasCbCalled); + } + + @Test + public void onCompactionCompleted() throws RocksDBException { + final AtomicBoolean wasCbCalled = new AtomicBoolean(); + final AbstractEventListener onCompactionCompletedListener = new AbstractEventListener() { + @Override + public void onCompactionCompleted( + final RocksDB db, final CompactionJobInfo compactionJobInfo) { + assertThat(compactionJobInfo.compactionReason()) + .isEqualTo(CompactionReason.kManualCompaction); + wasCbCalled.set(true); + } + }; + compactRange(onCompactionCompletedListener, wasCbCalled); + } + + @Test + public void onTableFileCreated() throws RocksDBException { + final AtomicBoolean wasCbCalled = new AtomicBoolean(); + final AbstractEventListener onTableFileCreatedListener = new AbstractEventListener() { + @Override + public void onTableFileCreated(final TableFileCreationInfo tableFileCreationInfo) { + assertThat(tableFileCreationInfo.getReason()).isEqualTo(TableFileCreationReason.FLUSH); + wasCbCalled.set(true); + } + }; + flushDb(onTableFileCreatedListener, wasCbCalled); + } + + @Test + public void onTableFileCreationStarted() throws RocksDBException { + final AtomicBoolean wasCbCalled = new AtomicBoolean(); + final AbstractEventListener onTableFileCreationStartedListener = new AbstractEventListener() { + @Override + public void onTableFileCreationStarted( + final TableFileCreationBriefInfo tableFileCreationBriefInfo) { + assertThat(tableFileCreationBriefInfo.getReason()).isEqualTo(TableFileCreationReason.FLUSH); + wasCbCalled.set(true); + } + }; + flushDb(onTableFileCreationStartedListener, wasCbCalled); + } + + void deleteColumnFamilyHandle(final AbstractEventListener el, final AtomicBoolean wasCbCalled) + throws RocksDBException { + try (final Options opt = + new Options().setCreateIfMissing(true).setListeners(Collections.singletonList(el)); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + assertThat(db).isNotNull(); + final byte[] value = new byte[24]; + rand.nextBytes(value); + db.put("testKey".getBytes(), value); + ColumnFamilyHandle columnFamilyHandle = db.getDefaultColumnFamily(); + columnFamilyHandle.close(); + assertThat(wasCbCalled.get()).isTrue(); + } + } + + @Test + public void onColumnFamilyHandleDeletionStarted() throws RocksDBException { + final AtomicBoolean wasCbCalled = new AtomicBoolean(); + final AbstractEventListener onColumnFamilyHandleDeletionStartedListener = + new AbstractEventListener() { + @Override + public void onColumnFamilyHandleDeletionStarted( + final ColumnFamilyHandle columnFamilyHandle) { + assertThat(columnFamilyHandle).isNotNull(); + wasCbCalled.set(true); + } + }; + deleteColumnFamilyHandle(onColumnFamilyHandleDeletionStartedListener, wasCbCalled); + } + + void ingestExternalFile(final AbstractEventListener el, final AtomicBoolean wasCbCalled) + throws RocksDBException { + try (final Options opt = + new Options().setCreateIfMissing(true).setListeners(Collections.singletonList(el)); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + assertThat(db).isNotNull(); + final String uuid = UUID.randomUUID().toString(); + final SstFileWriter sstFileWriter = new SstFileWriter(new EnvOptions(), opt); + final Path externalFilePath = Paths.get(db.getName(), uuid); + sstFileWriter.open(externalFilePath.toString()); + sstFileWriter.put("testKey".getBytes(), uuid.getBytes()); + sstFileWriter.finish(); + db.ingestExternalFile( + Collections.singletonList(externalFilePath.toString()), new IngestExternalFileOptions()); + assertThat(wasCbCalled.get()).isTrue(); + } + } + + @Test + public void onExternalFileIngested() throws RocksDBException { + final AtomicBoolean wasCbCalled = new AtomicBoolean(); + final AbstractEventListener onExternalFileIngestedListener = new AbstractEventListener() { + @Override + public void onExternalFileIngested( + final RocksDB db, final ExternalFileIngestionInfo externalFileIngestionInfo) { + assertThat(db).isNotNull(); + wasCbCalled.set(true); + } + }; + ingestExternalFile(onExternalFileIngestedListener, wasCbCalled); + } + + @Test + public void testAllCallbacksInvocation() { + final long TEST_LONG_VAL = -1; + // Expected test data objects + final Map userCollectedPropertiesTestData = + Collections.singletonMap("key", "value"); + final Map readablePropertiesTestData = Collections.singletonMap("key", "value"); + final TableProperties tablePropertiesTestData = new TableProperties(TEST_LONG_VAL, + TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, + TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, + TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, + TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, "columnFamilyName".getBytes(), + "filterPolicyName", "comparatorName", "mergeOperatorName", "prefixExtractorName", + "propertyCollectorsNames", "compressionName", userCollectedPropertiesTestData, + readablePropertiesTestData); + final FlushJobInfo flushJobInfoTestData = new FlushJobInfo(Integer.MAX_VALUE, + "testColumnFamily", "/file/path", TEST_LONG_VAL, Integer.MAX_VALUE, true, true, + TEST_LONG_VAL, TEST_LONG_VAL, tablePropertiesTestData, (byte) 0x0a); + final Status statusTestData = new Status(Status.Code.Incomplete, Status.SubCode.NoSpace, null); + final TableFileDeletionInfo tableFileDeletionInfoTestData = + new TableFileDeletionInfo("dbName", "/file/path", Integer.MAX_VALUE, statusTestData); + final TableFileCreationInfo tableFileCreationInfoTestData = + new TableFileCreationInfo(TEST_LONG_VAL, tablePropertiesTestData, statusTestData, "dbName", + "columnFamilyName", "/file/path", Integer.MAX_VALUE, (byte) 0x03); + final TableFileCreationBriefInfo tableFileCreationBriefInfoTestData = + new TableFileCreationBriefInfo( + "dbName", "columnFamilyName", "/file/path", Integer.MAX_VALUE, (byte) 0x03); + final MemTableInfo memTableInfoTestData = new MemTableInfo( + "columnFamilyName", TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL, TEST_LONG_VAL); + final FileOperationInfo fileOperationInfoTestData = new FileOperationInfo("/file/path", + TEST_LONG_VAL, TEST_LONG_VAL, 1_600_699_420_000_000_000L, 5_000_000_000L, statusTestData); + final WriteStallInfo writeStallInfoTestData = + new WriteStallInfo("columnFamilyName", (byte) 0x1, (byte) 0x2); + final ExternalFileIngestionInfo externalFileIngestionInfoTestData = + new ExternalFileIngestionInfo("columnFamilyName", "/external/file/path", + "/internal/file/path", TEST_LONG_VAL, tablePropertiesTestData); + + final CapturingTestableEventListener listener = new CapturingTestableEventListener() { + @Override + public void onFlushCompleted(final RocksDB db, final FlushJobInfo flushJobInfo) { + super.onFlushCompleted(db, flushJobInfo); + assertThat(flushJobInfo).isEqualTo(flushJobInfoTestData); + } + + @Override + public void onFlushBegin(final RocksDB db, final FlushJobInfo flushJobInfo) { + super.onFlushBegin(db, flushJobInfo); + assertThat(flushJobInfo).isEqualTo(flushJobInfoTestData); + } + + @Override + public void onTableFileDeleted(final TableFileDeletionInfo tableFileDeletionInfo) { + super.onTableFileDeleted(tableFileDeletionInfo); + assertThat(tableFileDeletionInfo).isEqualTo(tableFileDeletionInfoTestData); + } + + @Override + public void onCompactionBegin(final RocksDB db, final CompactionJobInfo compactionJobInfo) { + super.onCompactionBegin(db, compactionJobInfo); + assertThat(new String(compactionJobInfo.columnFamilyName(), StandardCharsets.UTF_8)) + .isEqualTo("compactionColumnFamily"); + assertThat(compactionJobInfo.status()).isEqualTo(statusTestData); + assertThat(compactionJobInfo.threadId()).isEqualTo(TEST_LONG_VAL); + assertThat(compactionJobInfo.jobId()).isEqualTo(Integer.MAX_VALUE); + assertThat(compactionJobInfo.baseInputLevel()).isEqualTo(Integer.MAX_VALUE); + assertThat(compactionJobInfo.outputLevel()).isEqualTo(Integer.MAX_VALUE); + assertThat(compactionJobInfo.inputFiles()) + .isEqualTo(Collections.singletonList("inputFile.sst")); + assertThat(compactionJobInfo.outputFiles()) + .isEqualTo(Collections.singletonList("outputFile.sst")); + assertThat(compactionJobInfo.tableProperties()) + .isEqualTo(Collections.singletonMap("tableProperties", tablePropertiesTestData)); + assertThat(compactionJobInfo.compactionReason()).isEqualTo(CompactionReason.kFlush); + assertThat(compactionJobInfo.compression()).isEqualTo(CompressionType.SNAPPY_COMPRESSION); + } + + @Override + public void onCompactionCompleted( + final RocksDB db, final CompactionJobInfo compactionJobInfo) { + super.onCompactionCompleted(db, compactionJobInfo); + assertThat(new String(compactionJobInfo.columnFamilyName())) + .isEqualTo("compactionColumnFamily"); + assertThat(compactionJobInfo.status()).isEqualTo(statusTestData); + assertThat(compactionJobInfo.threadId()).isEqualTo(TEST_LONG_VAL); + assertThat(compactionJobInfo.jobId()).isEqualTo(Integer.MAX_VALUE); + assertThat(compactionJobInfo.baseInputLevel()).isEqualTo(Integer.MAX_VALUE); + assertThat(compactionJobInfo.outputLevel()).isEqualTo(Integer.MAX_VALUE); + assertThat(compactionJobInfo.inputFiles()) + .isEqualTo(Collections.singletonList("inputFile.sst")); + assertThat(compactionJobInfo.outputFiles()) + .isEqualTo(Collections.singletonList("outputFile.sst")); + assertThat(compactionJobInfo.tableProperties()) + .isEqualTo(Collections.singletonMap("tableProperties", tablePropertiesTestData)); + assertThat(compactionJobInfo.compactionReason()).isEqualTo(CompactionReason.kFlush); + assertThat(compactionJobInfo.compression()).isEqualTo(CompressionType.SNAPPY_COMPRESSION); + } + + @Override + public void onTableFileCreated(final TableFileCreationInfo tableFileCreationInfo) { + super.onTableFileCreated(tableFileCreationInfo); + assertThat(tableFileCreationInfo).isEqualTo(tableFileCreationInfoTestData); + } + + @Override + public void onTableFileCreationStarted( + final TableFileCreationBriefInfo tableFileCreationBriefInfo) { + super.onTableFileCreationStarted(tableFileCreationBriefInfo); + assertThat(tableFileCreationBriefInfo).isEqualTo(tableFileCreationBriefInfoTestData); + } + + @Override + public void onMemTableSealed(final MemTableInfo memTableInfo) { + super.onMemTableSealed(memTableInfo); + assertThat(memTableInfo).isEqualTo(memTableInfoTestData); + } + + @Override + public void onColumnFamilyHandleDeletionStarted(final ColumnFamilyHandle columnFamilyHandle) { + super.onColumnFamilyHandleDeletionStarted(columnFamilyHandle); + } + + @Override + public void onExternalFileIngested( + final RocksDB db, final ExternalFileIngestionInfo externalFileIngestionInfo) { + super.onExternalFileIngested(db, externalFileIngestionInfo); + assertThat(externalFileIngestionInfo).isEqualTo(externalFileIngestionInfoTestData); + } + + @Override + public void onBackgroundError( + final BackgroundErrorReason backgroundErrorReason, final Status backgroundError) { + super.onBackgroundError(backgroundErrorReason, backgroundError); + } + + @Override + public void onStallConditionsChanged(final WriteStallInfo writeStallInfo) { + super.onStallConditionsChanged(writeStallInfo); + assertThat(writeStallInfo).isEqualTo(writeStallInfoTestData); + } + + @Override + public void onFileReadFinish(final FileOperationInfo fileOperationInfo) { + super.onFileReadFinish(fileOperationInfo); + assertThat(fileOperationInfo).isEqualTo(fileOperationInfoTestData); + } + + @Override + public void onFileWriteFinish(final FileOperationInfo fileOperationInfo) { + super.onFileWriteFinish(fileOperationInfo); + assertThat(fileOperationInfo).isEqualTo(fileOperationInfoTestData); + } + + @Override + public void onFileFlushFinish(final FileOperationInfo fileOperationInfo) { + super.onFileFlushFinish(fileOperationInfo); + assertThat(fileOperationInfo).isEqualTo(fileOperationInfoTestData); + } + + @Override + public void onFileSyncFinish(final FileOperationInfo fileOperationInfo) { + super.onFileSyncFinish(fileOperationInfo); + assertThat(fileOperationInfo).isEqualTo(fileOperationInfoTestData); + } + + @Override + public void onFileRangeSyncFinish(final FileOperationInfo fileOperationInfo) { + super.onFileRangeSyncFinish(fileOperationInfo); + assertThat(fileOperationInfo).isEqualTo(fileOperationInfoTestData); + } + + @Override + public void onFileTruncateFinish(final FileOperationInfo fileOperationInfo) { + super.onFileTruncateFinish(fileOperationInfo); + assertThat(fileOperationInfo).isEqualTo(fileOperationInfoTestData); + } + + @Override + public void onFileCloseFinish(final FileOperationInfo fileOperationInfo) { + super.onFileCloseFinish(fileOperationInfo); + assertThat(fileOperationInfo).isEqualTo(fileOperationInfoTestData); + } + + @Override + public boolean shouldBeNotifiedOnFileIO() { + super.shouldBeNotifiedOnFileIO(); + return false; + } + + @Override + public boolean onErrorRecoveryBegin( + final BackgroundErrorReason backgroundErrorReason, final Status backgroundError) { + super.onErrorRecoveryBegin(backgroundErrorReason, backgroundError); + assertThat(backgroundErrorReason).isEqualTo(BackgroundErrorReason.FLUSH); + assertThat(backgroundError).isEqualTo(statusTestData); + return true; + } + + @Override + public void onErrorRecoveryCompleted(final Status oldBackgroundError) { + super.onErrorRecoveryCompleted(oldBackgroundError); + assertThat(oldBackgroundError).isEqualTo(statusTestData); + } + }; + + // test action + listener.invokeAllCallbacks(); + + // assert + assertAllEventsCalled(listener); + + assertNoCallbackErrors(listener); + } + + @Test + public void testEnabledCallbacks() { + final EnabledEventCallback[] enabledEvents = { + EnabledEventCallback.ON_MEMTABLE_SEALED, EnabledEventCallback.ON_ERROR_RECOVERY_COMPLETED}; + + final CapturingTestableEventListener listener = + new CapturingTestableEventListener(enabledEvents); + + // test action + listener.invokeAllCallbacks(); + + // assert + assertEventsCalled(listener, enabledEvents); + } + + private static void assertAllEventsCalled( + final CapturingTestableEventListener capturingTestableEventListener) { + assertEventsCalled(capturingTestableEventListener, EnumSet.allOf(EnabledEventCallback.class)); + } + + private static void assertEventsCalled( + final CapturingTestableEventListener capturingTestableEventListener, + final EnabledEventCallback[] expected) { + assertEventsCalled(capturingTestableEventListener, EnumSet.copyOf(Arrays.asList(expected))); + } + + private static void assertNoCallbackErrors( + final CapturingTestableEventListener capturingTestableEventListener) { + for (AssertionError error : capturingTestableEventListener.capturedAssertionErrors) { + throw new Error("An assertion failed in callback", error); + } + } + + private static void assertEventsCalled( + final CapturingTestableEventListener capturingTestableEventListener, + final EnumSet expected) { + final ListenerEvents capturedEvents = capturingTestableEventListener.capturedListenerEvents; + + assertThat(capturedEvents.flushCompleted) + .isEqualTo(expected.contains(EnabledEventCallback.ON_FLUSH_COMPLETED)); + assertThat(capturedEvents.flushBegin) + .isEqualTo(expected.contains(EnabledEventCallback.ON_FLUSH_BEGIN)); + assertThat(capturedEvents.tableFileDeleted) + .isEqualTo(expected.contains(EnabledEventCallback.ON_TABLE_FILE_DELETED)); + assertThat(capturedEvents.compactionBegin) + .isEqualTo(expected.contains(EnabledEventCallback.ON_COMPACTION_BEGIN)); + assertThat(capturedEvents.compactionCompleted) + .isEqualTo(expected.contains(EnabledEventCallback.ON_COMPACTION_COMPLETED)); + assertThat(capturedEvents.tableFileCreated) + .isEqualTo(expected.contains(EnabledEventCallback.ON_TABLE_FILE_CREATED)); + assertThat(capturedEvents.tableFileCreationStarted) + .isEqualTo(expected.contains(EnabledEventCallback.ON_TABLE_FILE_CREATION_STARTED)); + assertThat(capturedEvents.memTableSealed) + .isEqualTo(expected.contains(EnabledEventCallback.ON_MEMTABLE_SEALED)); + assertThat(capturedEvents.columnFamilyHandleDeletionStarted) + .isEqualTo( + expected.contains(EnabledEventCallback.ON_COLUMN_FAMILY_HANDLE_DELETION_STARTED)); + assertThat(capturedEvents.externalFileIngested) + .isEqualTo(expected.contains(EnabledEventCallback.ON_EXTERNAL_FILE_INGESTED)); + assertThat(capturedEvents.backgroundError) + .isEqualTo(expected.contains(EnabledEventCallback.ON_BACKGROUND_ERROR)); + assertThat(capturedEvents.stallConditionsChanged) + .isEqualTo(expected.contains(EnabledEventCallback.ON_STALL_CONDITIONS_CHANGED)); + assertThat(capturedEvents.fileReadFinish) + .isEqualTo(expected.contains(EnabledEventCallback.ON_FILE_READ_FINISH)); + assertThat(capturedEvents.fileWriteFinish) + .isEqualTo(expected.contains(EnabledEventCallback.ON_FILE_WRITE_FINISH)); + assertThat(capturedEvents.fileFlushFinish) + .isEqualTo(expected.contains(EnabledEventCallback.ON_FILE_FLUSH_FINISH)); + assertThat(capturedEvents.fileSyncFinish) + .isEqualTo(expected.contains(EnabledEventCallback.ON_FILE_SYNC_FINISH)); + assertThat(capturedEvents.fileRangeSyncFinish) + .isEqualTo(expected.contains(EnabledEventCallback.ON_FILE_RANGE_SYNC_FINISH)); + assertThat(capturedEvents.fileTruncateFinish) + .isEqualTo(expected.contains(EnabledEventCallback.ON_FILE_TRUNCATE_FINISH)); + assertThat(capturedEvents.fileCloseFinish) + .isEqualTo(expected.contains(EnabledEventCallback.ON_FILE_CLOSE_FINISH)); + assertThat(capturedEvents.shouldBeNotifiedOnFileIO) + .isEqualTo(expected.contains(EnabledEventCallback.SHOULD_BE_NOTIFIED_ON_FILE_IO)); + assertThat(capturedEvents.errorRecoveryBegin) + .isEqualTo(expected.contains(EnabledEventCallback.ON_ERROR_RECOVERY_BEGIN)); + assertThat(capturedEvents.errorRecoveryCompleted) + .isEqualTo(expected.contains(EnabledEventCallback.ON_ERROR_RECOVERY_COMPLETED)); + assertThat(capturedEvents.errorRecoveryCompleted) + .isEqualTo(expected.contains(EnabledEventCallback.ON_ERROR_RECOVERY_COMPLETED)); + } + + /** + * Members are volatile as they may be written + * and read by different threads. + */ + private static class ListenerEvents { + volatile boolean flushCompleted; + volatile boolean flushBegin; + volatile boolean tableFileDeleted; + volatile boolean compactionBegin; + volatile boolean compactionCompleted; + volatile boolean tableFileCreated; + volatile boolean tableFileCreationStarted; + volatile boolean memTableSealed; + volatile boolean columnFamilyHandleDeletionStarted; + volatile boolean externalFileIngested; + volatile boolean backgroundError; + volatile boolean stallConditionsChanged; + volatile boolean fileReadFinish; + volatile boolean fileWriteFinish; + volatile boolean fileFlushFinish; + volatile boolean fileSyncFinish; + volatile boolean fileRangeSyncFinish; + volatile boolean fileTruncateFinish; + volatile boolean fileCloseFinish; + volatile boolean shouldBeNotifiedOnFileIO; + volatile boolean errorRecoveryBegin; + volatile boolean errorRecoveryCompleted; + } + + private static class CapturingObjectAssert extends ObjectAssert { + private final List assertionErrors; + public CapturingObjectAssert(T t, List assertionErrors) { + super(t); + this.assertionErrors = assertionErrors; + } + + @Override + public ObjectAssert isEqualTo(Object other) { + try { + return super.isEqualTo(other); + } catch (AssertionError error) { + assertionErrors.add(error); + throw error; + } + } + + @Override + public ObjectAssert isNotNull() { + try { + return super.isNotNull(); + } catch (AssertionError error) { + assertionErrors.add(error); + throw error; + } + } + } + + private static class CapturingTestableEventListener extends TestableEventListener { + final ListenerEvents capturedListenerEvents = new ListenerEvents(); + + final List capturedAssertionErrors = new ArrayList<>(); + + protected AbstractObjectAssert assertThat(T actual) { + return new CapturingObjectAssert(actual, capturedAssertionErrors); + } + + public CapturingTestableEventListener() {} + + public CapturingTestableEventListener(final EnabledEventCallback... enabledEventCallbacks) { + super(enabledEventCallbacks); + } + + @Override + public void onFlushCompleted(final RocksDB db, final FlushJobInfo flushJobInfo) { + capturedListenerEvents.flushCompleted = true; + } + + @Override + public void onFlushBegin(final RocksDB db, final FlushJobInfo flushJobInfo) { + capturedListenerEvents.flushBegin = true; + } + + @Override + public void onTableFileDeleted(final TableFileDeletionInfo tableFileDeletionInfo) { + capturedListenerEvents.tableFileDeleted = true; + } + + @Override + public void onCompactionBegin(final RocksDB db, final CompactionJobInfo compactionJobInfo) { + capturedListenerEvents.compactionBegin = true; + } + + @Override + public void onCompactionCompleted(final RocksDB db, final CompactionJobInfo compactionJobInfo) { + capturedListenerEvents.compactionCompleted = true; + } + + @Override + public void onTableFileCreated(final TableFileCreationInfo tableFileCreationInfo) { + capturedListenerEvents.tableFileCreated = true; + } + + @Override + public void onTableFileCreationStarted( + final TableFileCreationBriefInfo tableFileCreationBriefInfo) { + capturedListenerEvents.tableFileCreationStarted = true; + } + + @Override + public void onMemTableSealed(final MemTableInfo memTableInfo) { + capturedListenerEvents.memTableSealed = true; + } + + @Override + public void onColumnFamilyHandleDeletionStarted(final ColumnFamilyHandle columnFamilyHandle) { + capturedListenerEvents.columnFamilyHandleDeletionStarted = true; + } + + @Override + public void onExternalFileIngested( + final RocksDB db, final ExternalFileIngestionInfo externalFileIngestionInfo) { + capturedListenerEvents.externalFileIngested = true; + } + + @Override + public void onBackgroundError( + final BackgroundErrorReason backgroundErrorReason, final Status backgroundError) { + capturedListenerEvents.backgroundError = true; + } + + @Override + public void onStallConditionsChanged(final WriteStallInfo writeStallInfo) { + capturedListenerEvents.stallConditionsChanged = true; + } + + @Override + public void onFileReadFinish(final FileOperationInfo fileOperationInfo) { + capturedListenerEvents.fileReadFinish = true; + } + + @Override + public void onFileWriteFinish(final FileOperationInfo fileOperationInfo) { + capturedListenerEvents.fileWriteFinish = true; + } + + @Override + public void onFileFlushFinish(final FileOperationInfo fileOperationInfo) { + capturedListenerEvents.fileFlushFinish = true; + } + + @Override + public void onFileSyncFinish(final FileOperationInfo fileOperationInfo) { + capturedListenerEvents.fileSyncFinish = true; + } + + @Override + public void onFileRangeSyncFinish(final FileOperationInfo fileOperationInfo) { + capturedListenerEvents.fileRangeSyncFinish = true; + } + + @Override + public void onFileTruncateFinish(final FileOperationInfo fileOperationInfo) { + capturedListenerEvents.fileTruncateFinish = true; + } + + @Override + public void onFileCloseFinish(final FileOperationInfo fileOperationInfo) { + capturedListenerEvents.fileCloseFinish = true; + } + + @Override + public boolean shouldBeNotifiedOnFileIO() { + capturedListenerEvents.shouldBeNotifiedOnFileIO = true; + return false; + } + + @Override + public boolean onErrorRecoveryBegin( + final BackgroundErrorReason backgroundErrorReason, final Status backgroundError) { + capturedListenerEvents.errorRecoveryBegin = true; + return true; + } + + @Override + public void onErrorRecoveryCompleted(final Status oldBackgroundError) { + capturedListenerEvents.errorRecoveryCompleted = true; + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java new file mode 100644 index 000000000..dc5c19fbc --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java @@ -0,0 +1,39 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +public class FilterTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void filter() { + // new Bloom filter + final BlockBasedTableConfig blockConfig = new BlockBasedTableConfig(); + try(final Options options = new Options()) { + + try(final Filter bloomFilter = new BloomFilter()) { + blockConfig.setFilterPolicy(bloomFilter); + options.setTableFormatConfig(blockConfig); + } + + try(final Filter bloomFilter = new BloomFilter(10)) { + blockConfig.setFilterPolicy(bloomFilter); + options.setTableFormatConfig(blockConfig); + } + + try(final Filter bloomFilter = new BloomFilter(10, false)) { + blockConfig.setFilterPolicy(bloomFilter); + options.setTableFormatConfig(blockConfig); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/FlushOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/FlushOptionsTest.java new file mode 100644 index 000000000..f90ae911d --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/FlushOptionsTest.java @@ -0,0 +1,31 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class FlushOptionsTest { + + @Test + public void waitForFlush() { + try (final FlushOptions flushOptions = new FlushOptions()) { + assertThat(flushOptions.waitForFlush()).isTrue(); + flushOptions.setWaitForFlush(false); + assertThat(flushOptions.waitForFlush()).isFalse(); + } + } + + @Test + public void allowWriteStall() { + try (final FlushOptions flushOptions = new FlushOptions()) { + assertThat(flushOptions.allowWriteStall()).isFalse(); + flushOptions.setAllowWriteStall(true); + assertThat(flushOptions.allowWriteStall()).isTrue(); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java new file mode 100644 index 000000000..1a354f4ce --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java @@ -0,0 +1,49 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class FlushTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void flush() throws RocksDBException { + try(final Options options = new Options() + .setCreateIfMissing(true) + .setMaxWriteBufferNumber(10) + .setMinWriteBufferNumberToMerge(10); + final WriteOptions wOpt = new WriteOptions() + .setDisableWAL(true); + final FlushOptions flushOptions = new FlushOptions() + .setWaitForFlush(true)) { + assertThat(flushOptions.waitForFlush()).isTrue(); + + try(final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put(wOpt, "key1".getBytes(), "value1".getBytes()); + db.put(wOpt, "key2".getBytes(), "value2".getBytes()); + db.put(wOpt, "key3".getBytes(), "value3".getBytes()); + db.put(wOpt, "key4".getBytes(), "value4".getBytes()); + assertThat(db.getProperty("rocksdb.num-entries-active-mem-table")) + .isEqualTo("4"); + db.flush(flushOptions); + assertThat(db.getProperty("rocksdb.num-entries-active-mem-table")) + .isEqualTo("0"); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java new file mode 100644 index 000000000..12ee537d9 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java @@ -0,0 +1,109 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.util.Environment; + +import java.io.IOException; + +import static java.nio.file.Files.readAllBytes; +import static java.nio.file.Paths.get; +import static org.assertj.core.api.Assertions.assertThat; + +public class InfoLogLevelTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void testInfoLogLevel() throws RocksDBException, + IOException { + try (final RocksDB db = + RocksDB.open(dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + db.flush(new FlushOptions().setWaitForFlush(true)); + assertThat(getLogContentsWithoutHeader()).isNotEmpty(); + } + } + + @Test + public void testFatalLogLevel() throws RocksDBException, + IOException { + try (final Options options = new Options(). + setCreateIfMissing(true). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(options.infoLogLevel()). + isEqualTo(InfoLogLevel.FATAL_LEVEL); + db.put("key".getBytes(), "value".getBytes()); + // As InfoLogLevel is set to FATAL_LEVEL, here we expect the log + // content to be empty. + assertThat(getLogContentsWithoutHeader()).isEmpty(); + } + } + + @Test + public void testFatalLogLevelWithDBOptions() + throws RocksDBException, IOException { + try (final DBOptions dbOptions = new DBOptions(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL); + final Options options = new Options(dbOptions, + new ColumnFamilyOptions()). + setCreateIfMissing(true); + final RocksDB db = + RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + assertThat(dbOptions.infoLogLevel()). + isEqualTo(InfoLogLevel.FATAL_LEVEL); + assertThat(options.infoLogLevel()). + isEqualTo(InfoLogLevel.FATAL_LEVEL); + db.put("key".getBytes(), "value".getBytes()); + assertThat(getLogContentsWithoutHeader()).isEmpty(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void failIfIllegalByteValueProvided() { + InfoLogLevel.getInfoLogLevel((byte) -1); + } + + @Test + public void valueOf() { + assertThat(InfoLogLevel.valueOf("DEBUG_LEVEL")). + isEqualTo(InfoLogLevel.DEBUG_LEVEL); + } + + /** + * Read LOG file contents into String. + * + * @return LOG file contents as String. + * @throws IOException if file is not found. + */ + private String getLogContentsWithoutHeader() throws IOException { + final String separator = Environment.isWindows() ? + "\n" : System.getProperty("line.separator"); + final String[] lines = new String(readAllBytes(get( + dbFolder.getRoot().getAbsolutePath() + "/LOG"))).split(separator); + + int first_non_header = lines.length; + // Identify the last line of the header + for (int i = lines.length - 1; i >= 0; --i) { + if (lines[i].indexOf("DB pointer") >= 0) { + first_non_header = i + 1; + break; + } + } + StringBuilder builder = new StringBuilder(); + for (int i = first_non_header; i < lines.length; ++i) { + builder.append(lines[i]).append(separator); + } + return builder.toString(); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java new file mode 100644 index 000000000..ab7e21568 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/IngestExternalFileOptionsTest.java @@ -0,0 +1,107 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; + +public class IngestExternalFileOptionsTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE + = new RocksNativeLibraryResource(); + + public static final Random rand = + PlatformRandomHelper.getPlatformSpecificRandomFactory(); + + @Test + public void createExternalSstFileInfoWithoutParameters() { + try (final IngestExternalFileOptions options = + new IngestExternalFileOptions()) { + assertThat(options).isNotNull(); + } + } + + @Test + public void createExternalSstFileInfoWithParameters() { + final boolean moveFiles = rand.nextBoolean(); + final boolean snapshotConsistency = rand.nextBoolean(); + final boolean allowGlobalSeqNo = rand.nextBoolean(); + final boolean allowBlockingFlush = rand.nextBoolean(); + try (final IngestExternalFileOptions options = + new IngestExternalFileOptions(moveFiles, snapshotConsistency, + allowGlobalSeqNo, allowBlockingFlush)) { + assertThat(options).isNotNull(); + assertThat(options.moveFiles()).isEqualTo(moveFiles); + assertThat(options.snapshotConsistency()).isEqualTo(snapshotConsistency); + assertThat(options.allowGlobalSeqNo()).isEqualTo(allowGlobalSeqNo); + assertThat(options.allowBlockingFlush()).isEqualTo(allowBlockingFlush); + } + } + + @Test + public void moveFiles() { + try (final IngestExternalFileOptions options = + new IngestExternalFileOptions()) { + final boolean moveFiles = rand.nextBoolean(); + options.setMoveFiles(moveFiles); + assertThat(options.moveFiles()).isEqualTo(moveFiles); + } + } + + @Test + public void snapshotConsistency() { + try (final IngestExternalFileOptions options = + new IngestExternalFileOptions()) { + final boolean snapshotConsistency = rand.nextBoolean(); + options.setSnapshotConsistency(snapshotConsistency); + assertThat(options.snapshotConsistency()).isEqualTo(snapshotConsistency); + } + } + + @Test + public void allowGlobalSeqNo() { + try (final IngestExternalFileOptions options = + new IngestExternalFileOptions()) { + final boolean allowGlobalSeqNo = rand.nextBoolean(); + options.setAllowGlobalSeqNo(allowGlobalSeqNo); + assertThat(options.allowGlobalSeqNo()).isEqualTo(allowGlobalSeqNo); + } + } + + @Test + public void allowBlockingFlush() { + try (final IngestExternalFileOptions options = + new IngestExternalFileOptions()) { + final boolean allowBlockingFlush = rand.nextBoolean(); + options.setAllowBlockingFlush(allowBlockingFlush); + assertThat(options.allowBlockingFlush()).isEqualTo(allowBlockingFlush); + } + } + + @Test + public void ingestBehind() { + try (final IngestExternalFileOptions options = + new IngestExternalFileOptions()) { + assertThat(options.ingestBehind()).isFalse(); + options.setIngestBehind(true); + assertThat(options.ingestBehind()).isTrue(); + } + } + + @Test + public void writeGlobalSeqno() { + try (final IngestExternalFileOptions options = + new IngestExternalFileOptions()) { + assertThat(options.writeGlobalSeqno()).isTrue(); + options.setWriteGlobalSeqno(false); + assertThat(options.writeGlobalSeqno()).isFalse(); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java new file mode 100644 index 000000000..3f3bec6ba --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java @@ -0,0 +1,528 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +import java.nio.BufferUnderflowException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.*; +import org.junit.rules.ExpectedException; +import org.junit.rules.TemporaryFolder; + +public class KeyMayExistTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Rule public ExpectedException exceptionRule = ExpectedException.none(); + + List cfDescriptors; + List columnFamilyHandleList = new ArrayList<>(); + RocksDB db; + + // Slice key + int offset; + int len; + + byte[] sliceKey; + byte[] sliceValue; + + @Before + public void before() throws RocksDBException { + cfDescriptors = Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final DBOptions options = + new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true); + + db = RocksDB.open( + options, dbFolder.getRoot().getAbsolutePath(), cfDescriptors, columnFamilyHandleList); + + // Build the slice key + final StringBuilder builder = new StringBuilder("prefix"); + offset = builder.toString().length(); + builder.append("slice key 0"); + len = builder.toString().length() - offset; + builder.append("suffix"); + sliceKey = builder.toString().getBytes(UTF_8); + sliceValue = "slice value 0".getBytes(UTF_8); + } + + @After + public void after() { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { + columnFamilyHandle.close(); + } + db.close(); + } + + @Test + public void keyMayExist() throws RocksDBException { + assertThat(columnFamilyHandleList.size()).isEqualTo(2); + + // Standard key + db.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); + + // Test without column family + final Holder holder = new Holder<>(); + boolean exists = db.keyMayExist("key".getBytes(UTF_8), holder); + assertThat(exists).isTrue(); + assertThat(holder.getValue()).isNotNull(); + assertThat(new String(holder.getValue(), UTF_8)).isEqualTo("value"); + + exists = db.keyMayExist("key".getBytes(UTF_8), null); + assertThat(exists).isTrue(); + } + + @Test + public void keyMayExistReadOptions() throws RocksDBException { + // Test without column family but with readOptions + try (final ReadOptions readOptions = new ReadOptions()) { + // Standard key + db.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); + + // Slice key + db.put(sliceKey, offset, len, sliceValue, 0, sliceValue.length); + + final Holder holder = new Holder<>(); + boolean exists = db.keyMayExist(readOptions, "key".getBytes(UTF_8), holder); + assertThat(exists).isTrue(); + assertThat(holder.getValue()).isNotNull(); + assertThat(new String(holder.getValue(), UTF_8)).isEqualTo("value"); + + exists = db.keyMayExist(readOptions, "key".getBytes(UTF_8), null); + assertThat(exists).isTrue(); + + exists = db.keyMayExist(readOptions, sliceKey, offset, len, holder); + assertThat(exists).isTrue(); + assertThat(holder.getValue()).isNotNull(); + assertThat(holder.getValue()).isEqualTo(sliceValue); + + exists = db.keyMayExist(readOptions, sliceKey, offset, len, null); + assertThat(exists).isTrue(); + } + } + + @Test + public void keyMayExistColumnFamily() throws RocksDBException { + // Standard key + db.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); + + // Slice key + db.put(sliceKey, offset, len, sliceValue, 0, sliceValue.length); + + // Test slice key with column family + final Holder holder = new Holder<>(); + boolean exists = db.keyMayExist(columnFamilyHandleList.get(0), sliceKey, offset, len, holder); + assertThat(exists).isTrue(); + assertThat(holder.getValue()).isNotNull(); + assertThat(holder.getValue()).isEqualTo(sliceValue); + + exists = db.keyMayExist(columnFamilyHandleList.get(0), sliceKey, offset, len, null); + assertThat(exists).isTrue(); + } + + @Test + public void keyMayExistColumnFamilyReadOptions() throws RocksDBException { + // Standard key + db.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); + + // Slice key + db.put(sliceKey, offset, len, sliceValue, 0, sliceValue.length); + + // Test slice key with column family and read options + final Holder holder = new Holder<>(); + try (final ReadOptions readOptions = new ReadOptions()) { + boolean exists = + db.keyMayExist(columnFamilyHandleList.get(0), readOptions, "key".getBytes(UTF_8), holder); + assertThat(exists).isTrue(); + assertThat(holder.getValue()).isNotNull(); + assertThat(new String(holder.getValue(), UTF_8)).isEqualTo("value"); + + exists = + db.keyMayExist(columnFamilyHandleList.get(0), readOptions, "key".getBytes(UTF_8), null); + assertThat(exists).isTrue(); + + // Test slice key with column family and read options + exists = + db.keyMayExist(columnFamilyHandleList.get(0), readOptions, sliceKey, offset, len, holder); + assertThat(exists).isTrue(); + assertThat(holder.getValue()).isNotNull(); + assertThat(holder.getValue()).isEqualTo(sliceValue); + + exists = + db.keyMayExist(columnFamilyHandleList.get(0), readOptions, sliceKey, offset, len, null); + assertThat(exists).isTrue(); + } + } + + @Test + public void keyMayExistSliceKey() throws RocksDBException { + assertThat(columnFamilyHandleList.size()).isEqualTo(2); + + // Standard key + db.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); + + // Slice key + db.put(sliceKey, offset, len, sliceValue, 0, sliceValue.length); + + final Holder holder = new Holder<>(); + boolean exists = db.keyMayExist(sliceKey, offset, len, holder); + assertThat(exists).isTrue(); + assertThat(holder.getValue()).isNotNull(); + assertThat(holder.getValue()).isEqualTo(sliceValue); + + exists = db.keyMayExist(sliceKey, offset, len, null); + assertThat(exists).isTrue(); + + exists = db.keyMayExist("slice key".getBytes(UTF_8), null); + assertThat(exists).isFalse(); + + exists = db.keyMayExist("slice key 0".getBytes(UTF_8), null); + assertThat(exists).isTrue(); + + // Test with column family + exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(UTF_8), holder); + assertThat(exists).isTrue(); + assertThat(holder.getValue()).isNotNull(); + assertThat(new String(holder.getValue(), UTF_8)).isEqualTo("value"); + + exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(UTF_8), null); + assertThat(exists).isTrue(); + + // KeyMayExist in CF1 must return null value + exists = db.keyMayExist(columnFamilyHandleList.get(1), "key".getBytes(UTF_8), holder); + assertThat(exists).isFalse(); + assertThat(holder.getValue()).isNull(); + exists = db.keyMayExist(columnFamilyHandleList.get(1), "key".getBytes(UTF_8), null); + assertThat(exists).isFalse(); + + // slice key + exists = db.keyMayExist(columnFamilyHandleList.get(1), sliceKey, 1, 3, holder); + assertThat(exists).isFalse(); + assertThat(holder.getValue()).isNull(); + exists = db.keyMayExist(columnFamilyHandleList.get(1), sliceKey, 1, 3, null); + assertThat(exists).isFalse(); + } + + @Test + public void keyMayExistCF1() throws RocksDBException { + // Standard key + db.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); + + // Slice key + db.put(sliceKey, offset, len, sliceValue, 0, sliceValue.length); + + // KeyMayExist in CF1 must return null value + final Holder holder = new Holder<>(); + boolean exists = db.keyMayExist(columnFamilyHandleList.get(1), "key".getBytes(UTF_8), holder); + assertThat(exists).isFalse(); + assertThat(holder.getValue()).isNull(); + exists = db.keyMayExist(columnFamilyHandleList.get(1), "key".getBytes(UTF_8), null); + assertThat(exists).isFalse(); + } + + @Test + public void keyMayExistCF1Slice() throws RocksDBException { + // Standard key + db.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); + + // Slice key + db.put(sliceKey, offset, len, sliceValue, 0, sliceValue.length); + + // slice key + final Holder holder = new Holder<>(); + boolean exists = db.keyMayExist(columnFamilyHandleList.get(1), sliceKey, 1, 3, holder); + assertThat(exists).isFalse(); + assertThat(holder.getValue()).isNull(); + exists = db.keyMayExist(columnFamilyHandleList.get(1), sliceKey, 1, 3, null); + assertThat(exists).isFalse(); + } + + @Test + public void keyMayExistBB() throws RocksDBException { + // Standard key + db.put("keyBB".getBytes(UTF_8), "valueBB".getBytes(UTF_8)); + + final byte[] key = "keyBB".getBytes(UTF_8); + final byte[] value = "valueBB".getBytes(UTF_8); + + final ByteBuffer keyBuffer = ByteBuffer.allocateDirect(key.length); + keyBuffer.put(key, 0, key.length); + keyBuffer.flip(); + + assertThat(db.keyMayExist(keyBuffer)).isEqualTo(true); + + final ByteBuffer valueBuffer = ByteBuffer.allocateDirect(value.length + 24); + valueBuffer.position(12); + KeyMayExist keyMayExist = db.keyMayExist(keyBuffer, valueBuffer); + assertThat(keyMayExist.exists).isEqualTo(KeyMayExist.KeyMayExistEnum.kExistsWithValue); + assertThat(keyMayExist.valueLength).isEqualTo(value.length); + assertThat(valueBuffer.position()).isEqualTo(12); + assertThat(valueBuffer.limit()).isEqualTo(12 + value.length); + byte[] valueGet = new byte[value.length]; + valueBuffer.get(valueGet); + assertThat(valueGet).isEqualTo(value); + + valueBuffer.limit(value.length + 24); + valueBuffer.position(25); + keyMayExist = db.keyMayExist(keyBuffer, valueBuffer); + assertThat(keyMayExist.exists).isEqualTo(KeyMayExist.KeyMayExistEnum.kExistsWithValue); + assertThat(keyMayExist.valueLength).isEqualTo(value.length); + assertThat(valueBuffer.position()).isEqualTo(25); + assertThat(valueBuffer.limit()).isEqualTo(24 + value.length); + valueGet = new byte[value.length - 1]; + valueBuffer.get(valueGet); + assertThat(valueGet).isEqualTo(Arrays.copyOfRange(value, 0, value.length - 1)); + + exceptionRule.expect(BufferUnderflowException.class); + valueGet = new byte[value.length]; + valueBuffer.get(valueGet); + } + + @Test + public void keyMayExistBBReadOptions() throws RocksDBException { + // Standard key + db.put("keyBB".getBytes(UTF_8), "valueBB".getBytes(UTF_8)); + + final byte[] key = "keyBB".getBytes(UTF_8); + final byte[] value = "valueBB".getBytes(UTF_8); + + final ByteBuffer keyBuffer = ByteBuffer.allocateDirect(key.length); + keyBuffer.put(key, 0, key.length); + keyBuffer.flip(); + + try (final ReadOptions readOptions = new ReadOptions()) { + assertThat(db.keyMayExist(readOptions, keyBuffer)).isEqualTo(true); + + final ByteBuffer valueBuffer = ByteBuffer.allocateDirect(value.length + 24); + valueBuffer.position(12); + KeyMayExist keyMayExist = db.keyMayExist(readOptions, keyBuffer, valueBuffer); + assertThat(keyMayExist.exists).isEqualTo(KeyMayExist.KeyMayExistEnum.kExistsWithValue); + assertThat(keyMayExist.valueLength).isEqualTo(value.length); + assertThat(valueBuffer.position()).isEqualTo(12); + assertThat(valueBuffer.limit()).isEqualTo(12 + value.length); + byte[] valueGet = new byte[value.length]; + valueBuffer.get(valueGet); + assertThat(valueGet).isEqualTo(value); + + valueBuffer.limit(value.length + 24); + valueBuffer.position(25); + keyMayExist = db.keyMayExist(readOptions, keyBuffer, valueBuffer); + assertThat(keyMayExist.exists).isEqualTo(KeyMayExist.KeyMayExistEnum.kExistsWithValue); + assertThat(keyMayExist.valueLength).isEqualTo(value.length); + assertThat(valueBuffer.position()).isEqualTo(25); + assertThat(valueBuffer.limit()).isEqualTo(24 + value.length); + valueGet = new byte[value.length - 1]; + valueBuffer.get(valueGet); + assertThat(valueGet).isEqualTo(Arrays.copyOfRange(value, 0, value.length - 1)); + + exceptionRule.expect(BufferUnderflowException.class); + valueGet = new byte[value.length]; + valueBuffer.get(valueGet); + } + } + + @Test + public void keyMayExistBBNullValue() throws RocksDBException { + // Standard key + db.put("keyBB".getBytes(UTF_8), "valueBB".getBytes(UTF_8)); + + final byte[] key = "keyBB".getBytes(UTF_8); + + final ByteBuffer keyBuffer = ByteBuffer.allocateDirect(key.length); + keyBuffer.put(key, 0, key.length); + keyBuffer.flip(); + + exceptionRule.expect(AssertionError.class); + exceptionRule.expectMessage( + "value ByteBuffer parameter cannot be null. If you do not need the value, use a different version of the method"); + final KeyMayExist keyMayExist = db.keyMayExist(keyBuffer, null); + } + + @Test + public void keyMayExistBBCF() throws RocksDBException { + // Standard key + db.put(columnFamilyHandleList.get(0), "keyBBCF0".getBytes(UTF_8), "valueBBCF0".getBytes(UTF_8)); + db.put(columnFamilyHandleList.get(1), "keyBBCF1".getBytes(UTF_8), "valueBBCF1".getBytes(UTF_8)); + + // 0 is the default CF + byte[] key = "keyBBCF0".getBytes(UTF_8); + ByteBuffer keyBuffer = ByteBuffer.allocateDirect(key.length); + keyBuffer.put(key, 0, key.length); + keyBuffer.flip(); + + assertThat(db.keyMayExist(keyBuffer)).isEqualTo(true); + assertThat(db.keyMayExist(columnFamilyHandleList.get(1), keyBuffer)).isEqualTo(false); + assertThat(db.keyMayExist(columnFamilyHandleList.get(0), keyBuffer)).isEqualTo(true); + + // 1 is just a CF + key = "keyBBCF1".getBytes(UTF_8); + keyBuffer = ByteBuffer.allocateDirect(key.length); + keyBuffer.put(key, 0, key.length); + keyBuffer.flip(); + + assertThat(db.keyMayExist(keyBuffer)).isEqualTo(false); + assertThat(db.keyMayExist(columnFamilyHandleList.get(1), keyBuffer)).isEqualTo(true); + assertThat(db.keyMayExist(columnFamilyHandleList.get(0), keyBuffer)).isEqualTo(false); + + exceptionRule.expect(AssertionError.class); + exceptionRule.expectMessage( + "value ByteBuffer parameter cannot be null. If you do not need the value, use a different version of the method"); + final KeyMayExist keyMayExist = db.keyMayExist(columnFamilyHandleList.get(0), keyBuffer, null); + } + + @Test + public void keyMayExistBBCFReadOptions() throws RocksDBException { + // Standard key + db.put(columnFamilyHandleList.get(0), "keyBBCF0".getBytes(UTF_8), "valueBBCF0".getBytes(UTF_8)); + db.put(columnFamilyHandleList.get(1), "keyBBCF1".getBytes(UTF_8), "valueBBCF1".getBytes(UTF_8)); + + // 0 is the default CF + byte[] key = "keyBBCF0".getBytes(UTF_8); + ByteBuffer keyBuffer = ByteBuffer.allocateDirect(key.length); + keyBuffer.put(key, 0, key.length); + keyBuffer.flip(); + + try (final ReadOptions readOptions = new ReadOptions()) { + assertThat(db.keyMayExist(keyBuffer)).isEqualTo(true); + assertThat(db.keyMayExist(columnFamilyHandleList.get(1), readOptions, keyBuffer)) + .isEqualTo(false); + assertThat(db.keyMayExist(columnFamilyHandleList.get(0), readOptions, keyBuffer)) + .isEqualTo(true); + + // 1 is just a CF + key = "keyBBCF1".getBytes(UTF_8); + keyBuffer = ByteBuffer.allocateDirect(key.length); + keyBuffer.put(key, 0, key.length); + keyBuffer.flip(); + + assertThat(db.keyMayExist(readOptions, keyBuffer)).isEqualTo(false); + assertThat(db.keyMayExist(columnFamilyHandleList.get(1), readOptions, keyBuffer)) + .isEqualTo(true); + assertThat(db.keyMayExist(columnFamilyHandleList.get(0), readOptions, keyBuffer)) + .isEqualTo(false); + + exceptionRule.expect(AssertionError.class); + exceptionRule.expectMessage( + "value ByteBuffer parameter cannot be null. If you do not need the value, use a different version of the method"); + final KeyMayExist keyMayExist = + db.keyMayExist(columnFamilyHandleList.get(0), readOptions, keyBuffer, null); + } + } + + @Test + public void keyMayExistBBCFOffset() throws RocksDBException { + db.put(columnFamilyHandleList.get(1), "keyBBCF1".getBytes(UTF_8), "valueBBCF1".getBytes(UTF_8)); + + final byte[] key = "keyBBCF1".getBytes(UTF_8); + final byte[] value = "valueBBCF1".getBytes(UTF_8); + + final ByteBuffer keyBuffer = ByteBuffer.allocateDirect(key.length); + keyBuffer.put(key, 0, key.length); + keyBuffer.flip(); + + assertThat(db.keyMayExist(columnFamilyHandleList.get(1), keyBuffer)).isEqualTo(true); + + final ByteBuffer valueBuffer = ByteBuffer.allocateDirect(value.length + 24); + valueBuffer.position(12); + KeyMayExist keyMayExist = db.keyMayExist(columnFamilyHandleList.get(1), keyBuffer, valueBuffer); + assertThat(keyMayExist.exists).isEqualTo(KeyMayExist.KeyMayExistEnum.kExistsWithValue); + assertThat(keyMayExist.valueLength).isEqualTo(value.length); + assertThat(valueBuffer.position()).isEqualTo(12); + assertThat(valueBuffer.limit()).isEqualTo(12 + value.length); + byte[] valueGet = new byte[value.length]; + valueBuffer.get(valueGet); + assertThat(valueGet).isEqualTo(value); + + valueBuffer.limit(value.length + 24); + valueBuffer.position(25); + keyMayExist = db.keyMayExist(columnFamilyHandleList.get(1), keyBuffer, valueBuffer); + assertThat(keyMayExist.exists).isEqualTo(KeyMayExist.KeyMayExistEnum.kExistsWithValue); + assertThat(keyMayExist.valueLength).isEqualTo(value.length); + assertThat(valueBuffer.position()).isEqualTo(25); + assertThat(valueBuffer.limit()).isEqualTo(24 + value.length); + valueGet = new byte[value.length - 1]; + valueBuffer.get(valueGet); + assertThat(valueGet).isEqualTo(Arrays.copyOfRange(value, 0, value.length - 1)); + + exceptionRule.expect(BufferUnderflowException.class); + valueGet = new byte[value.length]; + valueBuffer.get(valueGet); + } + + @Test + public void keyMayExistBBCFOffsetReadOptions() throws RocksDBException { + db.put(columnFamilyHandleList.get(1), "keyBBCF1".getBytes(UTF_8), "valueBBCF1".getBytes(UTF_8)); + + final byte[] key = "keyBBCF1".getBytes(UTF_8); + final byte[] value = "valueBBCF1".getBytes(UTF_8); + + final ByteBuffer keyBuffer = ByteBuffer.allocateDirect(key.length); + keyBuffer.put(key, 0, key.length); + keyBuffer.flip(); + + try (final ReadOptions readOptions = new ReadOptions()) { + assertThat(db.keyMayExist(columnFamilyHandleList.get(1), readOptions, keyBuffer)) + .isEqualTo(true); + + final ByteBuffer valueBuffer = ByteBuffer.allocateDirect(value.length + 24); + valueBuffer.position(12); + KeyMayExist keyMayExist = + db.keyMayExist(columnFamilyHandleList.get(1), readOptions, keyBuffer, valueBuffer); + assertThat(keyMayExist.exists).isEqualTo(KeyMayExist.KeyMayExistEnum.kExistsWithValue); + assertThat(keyMayExist.valueLength).isEqualTo(value.length); + assertThat(valueBuffer.position()).isEqualTo(12); + assertThat(valueBuffer.limit()).isEqualTo(12 + value.length); + byte[] valueGet = new byte[value.length]; + valueBuffer.get(valueGet); + assertThat(valueGet).isEqualTo(value); + + valueBuffer.limit(value.length + 24); + valueBuffer.position(25); + keyMayExist = + db.keyMayExist(columnFamilyHandleList.get(1), readOptions, keyBuffer, valueBuffer); + assertThat(keyMayExist.exists).isEqualTo(KeyMayExist.KeyMayExistEnum.kExistsWithValue); + assertThat(keyMayExist.valueLength).isEqualTo(value.length); + assertThat(valueBuffer.position()).isEqualTo(25); + assertThat(valueBuffer.limit()).isEqualTo(24 + value.length); + valueGet = new byte[value.length - 1]; + valueBuffer.get(valueGet); + assertThat(valueGet).isEqualTo(Arrays.copyOfRange(value, 0, value.length - 1)); + + exceptionRule.expect(BufferUnderflowException.class); + valueGet = new byte[value.length]; + valueBuffer.get(valueGet); + } + } + + @Test + public void keyMayExistNonUnicodeString() throws RocksDBException { + final byte[] key = "key".getBytes(UTF_8); + final byte[] value = {(byte) 0x80}; // invalid unicode code-point + db.put(key, value); + + final byte[] buf = new byte[10]; + final int read = db.get(key, buf); + assertThat(read).isEqualTo(1); + assertThat(buf).startsWith(value); + + final Holder holder = new Holder<>(); + boolean exists = db.keyMayExist("key".getBytes(UTF_8), holder); + assertThat(exists).isTrue(); + assertThat(holder.getValue()).isNotNull(); + assertThat(holder.getValue()).isEqualTo(value); + + exists = db.keyMayExist("key".getBytes(UTF_8), null); + assertThat(exists).isTrue(); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java new file mode 100644 index 000000000..4d194e712 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/LRUCacheTest.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.ClassRule; +import org.junit.Test; + +public class LRUCacheTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void newLRUCache() { + final long capacity = 80000000; + final int numShardBits = 16; + final boolean strictCapacityLimit = true; + final double highPriPoolRatio = 0.5; + final double lowPriPoolRatio = 0.5; + try (final Cache lruCache = new LRUCache( + capacity, numShardBits, strictCapacityLimit, highPriPoolRatio, lowPriPoolRatio)) { + //no op + assertThat(lruCache.getUsage()).isGreaterThanOrEqualTo(0); + assertThat(lruCache.getPinnedUsage()).isGreaterThanOrEqualTo(0); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java new file mode 100644 index 000000000..5bc299f11 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java @@ -0,0 +1,239 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.assertj.core.api.Assertions.assertThat; + +public class LoggerTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void customLogger() throws RocksDBException { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + // Set custom logger to options + options.setLogger(logger); + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + // there should be more than zero received log messages in + // debug level. + assertThat(logMessageCounter.get()).isGreaterThan(0); + } + } + } + + @Test + public void warnLogger() throws RocksDBException { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.WARN_LEVEL). + setCreateIfMissing(true); + + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + + // Set custom logger to options + options.setLogger(logger); + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + // there should be zero messages + // using warn level as log level. + assertThat(logMessageCounter.get()).isEqualTo(0); + } + } + } + + + @Test + public void fatalLogger() throws RocksDBException { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + + // Set custom logger to options + options.setLogger(logger); + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + // there should be zero messages + // using fatal level as log level. + assertThat(logMessageCounter.get()).isEqualTo(0); + } + } + } + + @Test + public void dbOptionsLogger() throws RocksDBException { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final DBOptions options = new DBOptions(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + // Set custom logger to options + options.setLogger(logger); + + final List cfDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + final List cfHandles = new ArrayList<>(); + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, cfHandles)) { + try { + // there should be zero messages + // using fatal level as log level. + assertThat(logMessageCounter.get()).isEqualTo(0); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : cfHandles) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test + public void setWarnLogLevel() { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + assertThat(logger.infoLogLevel()). + isEqualTo(InfoLogLevel.FATAL_LEVEL); + logger.setInfoLogLevel(InfoLogLevel.WARN_LEVEL); + assertThat(logger.infoLogLevel()). + isEqualTo(InfoLogLevel.WARN_LEVEL); + } + } + + @Test + public void setInfoLogLevel() { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + assertThat(logger.infoLogLevel()). + isEqualTo(InfoLogLevel.FATAL_LEVEL); + logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL); + assertThat(logger.infoLogLevel()). + isEqualTo(InfoLogLevel.DEBUG_LEVEL); + } + } + + @Test + public void changeLogLevelAtRuntime() throws RocksDBException { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + + // Create new logger with max log level passed by options + final Logger logger = new Logger(options) { + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + // Set custom logger to options + options.setLogger(logger); + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + // there should be zero messages + // using fatal level as log level. + assertThat(logMessageCounter.get()).isEqualTo(0); + + // change log level to debug level + logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL); + + db.put("key".getBytes(), "value".getBytes()); + db.flush(new FlushOptions().setWaitForFlush(true)); + + // messages shall be received due to previous actions. + assertThat(logMessageCounter.get()).isNotEqualTo(0); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java new file mode 100644 index 000000000..73ac589a9 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java @@ -0,0 +1,111 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class MemTableTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void hashSkipListMemTable() throws RocksDBException { + try(final Options options = new Options()) { + // Test HashSkipListMemTableConfig + HashSkipListMemTableConfig memTableConfig = + new HashSkipListMemTableConfig(); + assertThat(memTableConfig.bucketCount()). + isEqualTo(1000000); + memTableConfig.setBucketCount(2000000); + assertThat(memTableConfig.bucketCount()). + isEqualTo(2000000); + assertThat(memTableConfig.height()). + isEqualTo(4); + memTableConfig.setHeight(5); + assertThat(memTableConfig.height()). + isEqualTo(5); + assertThat(memTableConfig.branchingFactor()). + isEqualTo(4); + memTableConfig.setBranchingFactor(6); + assertThat(memTableConfig.branchingFactor()). + isEqualTo(6); + options.setMemTableConfig(memTableConfig); + } + } + + @Test + public void skipListMemTable() throws RocksDBException { + try(final Options options = new Options()) { + SkipListMemTableConfig skipMemTableConfig = + new SkipListMemTableConfig(); + assertThat(skipMemTableConfig.lookahead()). + isEqualTo(0); + skipMemTableConfig.setLookahead(20); + assertThat(skipMemTableConfig.lookahead()). + isEqualTo(20); + options.setMemTableConfig(skipMemTableConfig); + } + } + + @Test + public void hashLinkedListMemTable() throws RocksDBException { + try(final Options options = new Options()) { + HashLinkedListMemTableConfig hashLinkedListMemTableConfig = + new HashLinkedListMemTableConfig(); + assertThat(hashLinkedListMemTableConfig.bucketCount()). + isEqualTo(50000); + hashLinkedListMemTableConfig.setBucketCount(100000); + assertThat(hashLinkedListMemTableConfig.bucketCount()). + isEqualTo(100000); + assertThat(hashLinkedListMemTableConfig.hugePageTlbSize()). + isEqualTo(0); + hashLinkedListMemTableConfig.setHugePageTlbSize(1); + assertThat(hashLinkedListMemTableConfig.hugePageTlbSize()). + isEqualTo(1); + assertThat(hashLinkedListMemTableConfig. + bucketEntriesLoggingThreshold()). + isEqualTo(4096); + hashLinkedListMemTableConfig. + setBucketEntriesLoggingThreshold(200); + assertThat(hashLinkedListMemTableConfig. + bucketEntriesLoggingThreshold()). + isEqualTo(200); + assertThat(hashLinkedListMemTableConfig. + ifLogBucketDistWhenFlush()).isTrue(); + hashLinkedListMemTableConfig. + setIfLogBucketDistWhenFlush(false); + assertThat(hashLinkedListMemTableConfig. + ifLogBucketDistWhenFlush()).isFalse(); + assertThat(hashLinkedListMemTableConfig. + thresholdUseSkiplist()). + isEqualTo(256); + hashLinkedListMemTableConfig.setThresholdUseSkiplist(29); + assertThat(hashLinkedListMemTableConfig. + thresholdUseSkiplist()). + isEqualTo(29); + options.setMemTableConfig(hashLinkedListMemTableConfig); + } + } + + @Test + public void vectorMemTable() throws RocksDBException { + try(final Options options = new Options()) { + VectorMemTableConfig vectorMemTableConfig = + new VectorMemTableConfig(); + assertThat(vectorMemTableConfig.reservedSize()). + isEqualTo(0); + vectorMemTableConfig.setReservedSize(123); + assertThat(vectorMemTableConfig.reservedSize()). + isEqualTo(123); + options.setMemTableConfig(vectorMemTableConfig); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MemoryUtilTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MemoryUtilTest.java new file mode 100644 index 000000000..1bea02379 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/MemoryUtilTest.java @@ -0,0 +1,144 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.nio.charset.StandardCharsets; +import java.util.*; + +import static org.assertj.core.api.Assertions.assertThat; + +public class MemoryUtilTest { + + private static final String MEMTABLE_SIZE = "rocksdb.size-all-mem-tables"; + private static final String UNFLUSHED_MEMTABLE_SIZE = "rocksdb.cur-size-all-mem-tables"; + private static final String TABLE_READERS = "rocksdb.estimate-table-readers-mem"; + + private final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8); + private final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8); + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule public TemporaryFolder dbFolder1 = new TemporaryFolder(); + @Rule public TemporaryFolder dbFolder2 = new TemporaryFolder(); + + /** + * Test MemoryUtil.getApproximateMemoryUsageByType before and after a put + get + */ + @Test + public void getApproximateMemoryUsageByType() throws RocksDBException { + try (final Cache cache = new LRUCache(8 * 1024 * 1024); + final Options options = + new Options() + .setCreateIfMissing(true) + .setTableFormatConfig(new BlockBasedTableConfig().setBlockCache(cache)); + final FlushOptions flushOptions = + new FlushOptions().setWaitForFlush(true); + final RocksDB db = + RocksDB.open(options, dbFolder1.getRoot().getAbsolutePath())) { + + List dbs = new ArrayList<>(1); + dbs.add(db); + Set caches = new HashSet<>(1); + caches.add(cache); + Map usage = MemoryUtil.getApproximateMemoryUsageByType(dbs, caches); + + assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isEqualTo( + db.getAggregatedLongProperty(MEMTABLE_SIZE)); + assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isEqualTo( + db.getAggregatedLongProperty(UNFLUSHED_MEMTABLE_SIZE)); + assertThat(usage.get(MemoryUsageType.kTableReadersTotal)).isEqualTo( + db.getAggregatedLongProperty(TABLE_READERS)); + // TODO(peterd): disable block cache entry stats and check for 0 + assertThat(usage.get(MemoryUsageType.kCacheTotal)).isLessThan(1024); + + db.put(key, value); + db.flush(flushOptions); + db.get(key); + + usage = MemoryUtil.getApproximateMemoryUsageByType(dbs, caches); + assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isGreaterThan(0); + assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isEqualTo( + db.getAggregatedLongProperty(MEMTABLE_SIZE)); + assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isGreaterThan(0); + assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isEqualTo( + db.getAggregatedLongProperty(UNFLUSHED_MEMTABLE_SIZE)); + assertThat(usage.get(MemoryUsageType.kTableReadersTotal)).isGreaterThan(0); + assertThat(usage.get(MemoryUsageType.kTableReadersTotal)).isEqualTo( + db.getAggregatedLongProperty(TABLE_READERS)); + assertThat(usage.get(MemoryUsageType.kCacheTotal)).isGreaterThan(0); + + } + } + + /** + * Test MemoryUtil.getApproximateMemoryUsageByType with null inputs + */ + @Test + public void getApproximateMemoryUsageByTypeNulls() throws RocksDBException { + Map usage = MemoryUtil.getApproximateMemoryUsageByType(null, null); + + assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isEqualTo(null); + assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isEqualTo(null); + assertThat(usage.get(MemoryUsageType.kTableReadersTotal)).isEqualTo(null); + assertThat(usage.get(MemoryUsageType.kCacheTotal)).isEqualTo(null); + } + + /** + * Test MemoryUtil.getApproximateMemoryUsageByType with two DBs and two caches + */ + @Test + public void getApproximateMemoryUsageByTypeMultiple() throws RocksDBException { + try (final Cache cache1 = new LRUCache(1 * 1024 * 1024); + final Options options1 = + new Options() + .setCreateIfMissing(true) + .setTableFormatConfig(new BlockBasedTableConfig().setBlockCache(cache1)); + final RocksDB db1 = + RocksDB.open(options1, dbFolder1.getRoot().getAbsolutePath()); + final Cache cache2 = new LRUCache(1 * 1024 * 1024); + final Options options2 = + new Options() + .setCreateIfMissing(true) + .setTableFormatConfig(new BlockBasedTableConfig().setBlockCache(cache2)); + final RocksDB db2 = + RocksDB.open(options2, dbFolder2.getRoot().getAbsolutePath()); + final FlushOptions flushOptions = + new FlushOptions().setWaitForFlush(true); + + ) { + List dbs = new ArrayList<>(1); + dbs.add(db1); + dbs.add(db2); + Set caches = new HashSet<>(1); + caches.add(cache1); + caches.add(cache2); + + for (RocksDB db: dbs) { + db.put(key, value); + db.flush(flushOptions); + db.get(key); + } + + Map usage = MemoryUtil.getApproximateMemoryUsageByType(dbs, caches); + assertThat(usage.get(MemoryUsageType.kMemTableTotal)).isEqualTo( + db1.getAggregatedLongProperty(MEMTABLE_SIZE) + db2.getAggregatedLongProperty(MEMTABLE_SIZE)); + assertThat(usage.get(MemoryUsageType.kMemTableUnFlushed)).isEqualTo( + db1.getAggregatedLongProperty(UNFLUSHED_MEMTABLE_SIZE) + db2.getAggregatedLongProperty(UNFLUSHED_MEMTABLE_SIZE)); + assertThat(usage.get(MemoryUsageType.kTableReadersTotal)).isEqualTo( + db1.getAggregatedLongProperty(TABLE_READERS) + db2.getAggregatedLongProperty(TABLE_READERS)); + assertThat(usage.get(MemoryUsageType.kCacheTotal)).isGreaterThan(0); + + } + } + +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java new file mode 100644 index 000000000..a840eb104 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java @@ -0,0 +1,465 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class MergeTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void stringOption() + throws InterruptedException, RocksDBException { + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperatorName("stringappend"); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // writing aa under key + db.put("key".getBytes(), "aa".getBytes()); + // merge bb under key + db.merge("key".getBytes(), "bb".getBytes()); + + final byte[] value = db.get("key".getBytes()); + final String strValue = new String(value); + assertThat(strValue).isEqualTo("aa,bb"); + } + } + + private byte[] longToByteArray(long l) { + ByteBuffer buf = ByteBuffer.allocate(Long.SIZE / Byte.SIZE).order(ByteOrder.LITTLE_ENDIAN); + buf.putLong(l); + return buf.array(); + } + + private long longFromByteArray(byte[] a) { + ByteBuffer buf = ByteBuffer.allocate(Long.SIZE / Byte.SIZE).order(ByteOrder.LITTLE_ENDIAN); + buf.put(a); + buf.flip(); + return buf.getLong(); + } + + @Test + public void uint64AddOption() + throws InterruptedException, RocksDBException { + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperatorName("uint64add"); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // writing (long)100 under key + db.put("key".getBytes(), longToByteArray(100)); + // merge (long)1 under key + db.merge("key".getBytes(), longToByteArray(1)); + + final byte[] value = db.get("key".getBytes()); + final long longValue = longFromByteArray(value); + assertThat(longValue).isEqualTo(101); + } + } + + @Test + public void cFStringOption() + throws InterruptedException, RocksDBException { + + try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions() + .setMergeOperatorName("stringappend"); + final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions() + .setMergeOperatorName("stringappend") + ) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1), + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt2) + ); + + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions opt = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + try { + // writing aa under key + db.put(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "aa".getBytes()); + // merge bb under key + db.merge(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "bb".getBytes()); + + byte[] value = db.get(columnFamilyHandleList.get(1), + "cfkey".getBytes()); + String strValue = new String(value); + assertThat(strValue).isEqualTo("aa,bb"); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandleList) { + handle.close(); + } + } + } + } + } + + @Test + public void cFUInt64AddOption() + throws InterruptedException, RocksDBException { + + try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions() + .setMergeOperatorName("uint64add"); + final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions() + .setMergeOperatorName("uint64add") + ) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1), + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt2) + ); + + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions opt = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + try { + // writing (long)100 under key + db.put(columnFamilyHandleList.get(1), + "cfkey".getBytes(), longToByteArray(100)); + // merge (long)157 under key + db.merge(columnFamilyHandleList.get(1), "cfkey".getBytes(), longToByteArray(157)); + + byte[] value = db.get(columnFamilyHandleList.get(1), + "cfkey".getBytes()); + long longValue = longFromByteArray(value); + assertThat(longValue).isEqualTo(257); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandleList) { + handle.close(); + } + } + } + } + } + + @Test + public void operatorOption() + throws InterruptedException, RocksDBException { + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // Writing aa under key + db.put("key".getBytes(), "aa".getBytes()); + + // Writing bb under key + db.merge("key".getBytes(), "bb".getBytes()); + + final byte[] value = db.get("key".getBytes()); + final String strValue = new String(value); + + assertThat(strValue).isEqualTo("aa,bb"); + } + } + + @Test + public void uint64AddOperatorOption() + throws InterruptedException, RocksDBException { + try (final UInt64AddOperator uint64AddOperator = new UInt64AddOperator(); + final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(uint64AddOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // Writing (long)100 under key + db.put("key".getBytes(), longToByteArray(100)); + + // Writing (long)1 under key + db.merge("key".getBytes(), longToByteArray(1)); + + final byte[] value = db.get("key".getBytes()); + final long longValue = longFromByteArray(value); + + assertThat(longValue).isEqualTo(101); + } + } + + @Test + public void cFOperatorOption() + throws InterruptedException, RocksDBException { + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions() + .setMergeOperator(stringAppendOperator); + final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions() + .setMergeOperator(stringAppendOperator) + ) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1), + new ColumnFamilyDescriptor("new_cf".getBytes(), cfOpt2) + ); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions opt = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList) + ) { + try { + // writing aa under key + db.put(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "aa".getBytes()); + // merge bb under key + db.merge(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "bb".getBytes()); + byte[] value = db.get(columnFamilyHandleList.get(1), + "cfkey".getBytes()); + String strValue = new String(value); + + // Test also with createColumnFamily + try (final ColumnFamilyOptions cfHandleOpts = + new ColumnFamilyOptions() + .setMergeOperator(stringAppendOperator); + final ColumnFamilyHandle cfHandle = + db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf2".getBytes(), + cfHandleOpts)) + ) { + // writing xx under cfkey2 + db.put(cfHandle, "cfkey2".getBytes(), "xx".getBytes()); + // merge yy under cfkey2 + db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(), + "yy".getBytes()); + value = db.get(cfHandle, "cfkey2".getBytes()); + String strValueTmpCf = new String(value); + + assertThat(strValue).isEqualTo("aa,bb"); + assertThat(strValueTmpCf).isEqualTo("xx,yy"); + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test + public void cFUInt64AddOperatorOption() + throws InterruptedException, RocksDBException { + try (final UInt64AddOperator uint64AddOperator = new UInt64AddOperator(); + final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions() + .setMergeOperator(uint64AddOperator); + final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions() + .setMergeOperator(uint64AddOperator) + ) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1), + new ColumnFamilyDescriptor("new_cf".getBytes(), cfOpt2) + ); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions opt = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList) + ) { + try { + // writing (long)100 under key + db.put(columnFamilyHandleList.get(1), + "cfkey".getBytes(), longToByteArray(100)); + // merge (long)1 under key + db.merge(columnFamilyHandleList.get(1), + "cfkey".getBytes(), longToByteArray(1)); + byte[] value = db.get(columnFamilyHandleList.get(1), + "cfkey".getBytes()); + long longValue = longFromByteArray(value); + + // Test also with createColumnFamily + try (final ColumnFamilyOptions cfHandleOpts = + new ColumnFamilyOptions() + .setMergeOperator(uint64AddOperator); + final ColumnFamilyHandle cfHandle = + db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf2".getBytes(), + cfHandleOpts)) + ) { + // writing (long)200 under cfkey2 + db.put(cfHandle, "cfkey2".getBytes(), longToByteArray(200)); + // merge (long)50 under cfkey2 + db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(), + longToByteArray(50)); + value = db.get(cfHandle, "cfkey2".getBytes()); + long longValueTmpCf = longFromByteArray(value); + + assertThat(longValue).isEqualTo(101); + assertThat(longValueTmpCf).isEqualTo(250); + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test + public void operatorGcBehaviour() + throws RocksDBException { + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator()) { + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test reuse + try (final Options opt = new Options() + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test param init + try (final StringAppendOperator stringAppendOperator2 = new StringAppendOperator(); + final Options opt = new Options() + .setMergeOperator(stringAppendOperator2); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test replace one with another merge operator instance + try (final Options opt = new Options() + .setMergeOperator(stringAppendOperator); + final StringAppendOperator newStringAppendOperator = new StringAppendOperator()) { + opt.setMergeOperator(newStringAppendOperator); + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + } + } + } + + @Test + public void uint64AddOperatorGcBehaviour() + throws RocksDBException { + try (final UInt64AddOperator uint64AddOperator = new UInt64AddOperator()) { + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(uint64AddOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test reuse + try (final Options opt = new Options() + .setMergeOperator(uint64AddOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test param init + try (final UInt64AddOperator uint64AddOperator2 = new UInt64AddOperator(); + final Options opt = new Options() + .setMergeOperator(uint64AddOperator2); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test replace one with another merge operator instance + try (final Options opt = new Options() + .setMergeOperator(uint64AddOperator); + final UInt64AddOperator newUInt64AddOperator = new UInt64AddOperator()) { + opt.setMergeOperator(newUInt64AddOperator); + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + } + } + } + + @Test + public void emptyStringAsStringAppendDelimiter() throws RocksDBException { + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator(""); + final Options opt = + new Options().setCreateIfMissing(true).setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "aa".getBytes()); + db.merge("key".getBytes(), "bb".getBytes()); + final byte[] value = db.get("key".getBytes()); + assertThat(new String(value)).isEqualTo("aabb"); + } + } + + @Test + public void multiCharStringAsStringAppendDelimiter() throws RocksDBException { + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator("<>"); + final Options opt = + new Options().setCreateIfMissing(true).setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "aa".getBytes()); + db.merge("key".getBytes(), "bb".getBytes()); + final byte[] value = db.get("key".getBytes()); + assertThat(new String(value)).isEqualTo("aa<>bb"); + } + } + + @Test + public void emptyStringInSetMergeOperatorByName() { + try (final Options opt = new Options() + .setMergeOperatorName(""); + final ColumnFamilyOptions cOpt = new ColumnFamilyOptions() + .setMergeOperatorName("")) { + //no-op + } + } + + @Test(expected = IllegalArgumentException.class) + public void nullStringInSetMergeOperatorByNameOptions() { + try (final Options opt = new Options()) { + opt.setMergeOperatorName(null); + } + } + + @Test(expected = IllegalArgumentException.class) + public void + nullStringInSetMergeOperatorByNameColumnFamilyOptions() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + opt.setMergeOperatorName(null); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java new file mode 100644 index 000000000..10c92d49d --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class MixedOptionsTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void mixedOptionsTest(){ + // Set a table factory and check the names + try(final Filter bloomFilter = new BloomFilter(); + final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions() + .setTableFormatConfig( + new BlockBasedTableConfig().setFilterPolicy(bloomFilter)) + ) { + assertThat(cfOptions.tableFactoryName()).isEqualTo( + "BlockBasedTable"); + cfOptions.setTableFormatConfig(new PlainTableConfig()); + assertThat(cfOptions.tableFactoryName()).isEqualTo("PlainTable"); + // Initialize a dbOptions object from cf options and + // db options + try (final DBOptions dbOptions = new DBOptions(); + final Options options = new Options(dbOptions, cfOptions)) { + assertThat(options.tableFactoryName()).isEqualTo("PlainTable"); + // Free instances + } + } + + // Test Optimize for statements + try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()) { + cfOptions.optimizeUniversalStyleCompaction(); + cfOptions.optimizeLevelStyleCompaction(); + cfOptions.optimizeForPointLookup(1024); + try(final Options options = new Options()) { + options.optimizeLevelStyleCompaction(); + options.optimizeLevelStyleCompaction(400); + options.optimizeUniversalStyleCompaction(); + options.optimizeUniversalStyleCompaction(400); + options.optimizeForPointLookup(1024); + options.prepareForBulkLoad(); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java new file mode 100644 index 000000000..cdfd9d3a9 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/MultiColumnRegressionTest.java @@ -0,0 +1,146 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +/** + * Test for changes made by + * transactional multiGet problem + * the tests here were previously broken by the nonsense removed by that change. + */ +@RunWith(Parameterized.class) +public class MultiColumnRegressionTest { + @Parameterized.Parameters + public static List data() { + return Arrays.asList(new Params(3, 100), new Params(3, 1000000)); + } + + public static class Params { + final int numColumns; + final int keySize; + + public Params(final int numColumns, final int keySize) { + this.numColumns = numColumns; + this.keySize = keySize; + } + } + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + private final Params params; + + public MultiColumnRegressionTest(final Params params) { + this.params = params; + } + + @Test + public void transactionDB() throws RocksDBException { + final List columnFamilyDescriptors = new ArrayList<>(); + for (int i = 0; i < params.numColumns; i++) { + StringBuilder sb = new StringBuilder(); + sb.append("cf" + i); + for (int j = 0; j < params.keySize; j++) sb.append("_cf"); + columnFamilyDescriptors.add(new ColumnFamilyDescriptor(sb.toString().getBytes())); + } + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + final List columnFamilyHandles = + db.createColumnFamilies(columnFamilyDescriptors); + } + + columnFamilyDescriptors.add(new ColumnFamilyDescriptor("default".getBytes())); + final List columnFamilyHandles = new ArrayList<>(); + try (final TransactionDB tdb = TransactionDB.open(new DBOptions().setCreateIfMissing(true), + new TransactionDBOptions(), dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + final WriteOptions writeOptions = new WriteOptions(); + try (Transaction transaction = tdb.beginTransaction(writeOptions)) { + for (int i = 0; i < params.numColumns; i++) { + transaction.put( + columnFamilyHandles.get(i), ("key" + i).getBytes(), ("value" + (i - 7)).getBytes()); + } + transaction.put("key".getBytes(), "value".getBytes()); + transaction.commit(); + } + for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + columnFamilyHandle.close(); + } + } + + final List columnFamilyHandles2 = new ArrayList<>(); + try (final TransactionDB tdb = TransactionDB.open(new DBOptions().setCreateIfMissing(true), + new TransactionDBOptions(), dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles2)) { + try (Transaction transaction = tdb.beginTransaction(new WriteOptions())) { + final ReadOptions readOptions = new ReadOptions(); + for (int i = 0; i < params.numColumns; i++) { + final byte[] value = + transaction.get(columnFamilyHandles2.get(i), readOptions, ("key" + i).getBytes()); + assertThat(value).isEqualTo(("value" + (i - 7)).getBytes()); + } + transaction.commit(); + } + for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles2) { + columnFamilyHandle.close(); + } + } + } + + @Test + public void optimisticDB() throws RocksDBException { + final List columnFamilyDescriptors = new ArrayList<>(); + for (int i = 0; i < params.numColumns; i++) { + columnFamilyDescriptors.add(new ColumnFamilyDescriptor("default".getBytes())); + } + + columnFamilyDescriptors.add(new ColumnFamilyDescriptor("default".getBytes())); + final List columnFamilyHandles = new ArrayList<>(); + try (final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( + new DBOptions().setCreateIfMissing(true), dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + try (Transaction transaction = otdb.beginTransaction(new WriteOptions())) { + for (int i = 0; i < params.numColumns; i++) { + transaction.put( + columnFamilyHandles.get(i), ("key" + i).getBytes(), ("value" + (i - 7)).getBytes()); + } + transaction.put("key".getBytes(), "value".getBytes()); + transaction.commit(); + } + for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + columnFamilyHandle.close(); + } + } + + final List columnFamilyHandles2 = new ArrayList<>(); + try (final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( + new DBOptions().setCreateIfMissing(true), dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles2)) { + try (Transaction transaction = otdb.beginTransaction(new WriteOptions())) { + final ReadOptions readOptions = new ReadOptions(); + for (int i = 0; i < params.numColumns; i++) { + final byte[] value = + transaction.get(columnFamilyHandles2.get(i), readOptions, ("key" + i).getBytes()); + assertThat(value).isEqualTo(("value" + (i - 7)).getBytes()); + } + transaction.commit(); + } + for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles2) { + columnFamilyHandle.close(); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java new file mode 100644 index 000000000..90a13e1da --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/MultiGetManyKeysTest.java @@ -0,0 +1,241 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.*; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@RunWith(Parameterized.class) +public class MultiGetManyKeysTest { + @Parameterized.Parameters + public static List data() { + return Arrays.asList(2, 3, 250, 60000, 70000, 150000, 750000); + } + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + private final int numKeys; + + public MultiGetManyKeysTest(final Integer numKeys) { + this.numKeys = numKeys; + } + + /** + * Test for multiGet problem + */ + @Test + public void multiGetAsListLarge() throws RocksDBException { + final List keys = generateRandomKeys(numKeys); + final Map keyValues = generateRandomKeyValues(keys, 10); + putKeysAndValues(keyValues); + + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + final List values = db.multiGetAsList(keys); + assertKeysAndValues(keys, keyValues, values); + } + } + + /** + * Test for transactional multiGet + * problem + */ + @Test + public void multiGetAsListLargeTransactional() throws RocksDBException { + final List keys = generateRandomKeys(numKeys); + final Map keyValues = generateRandomKeyValues(keys, 10); + putKeysAndValues(keyValues); + + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB txnDB = + TransactionDB.open(options, txnDbOptions, dbFolder.getRoot().getAbsolutePath())) { + try (final Transaction transaction = txnDB.beginTransaction(new WriteOptions())) { + final List values = transaction.multiGetAsList(new ReadOptions(), keys); + assertKeysAndValues(keys, keyValues, values); + } + } + } + + /** + * Test for transactional multiGet + * problem + */ + @Test + public void multiGetForUpdateAsListLargeTransactional() throws RocksDBException { + final List keys = generateRandomKeys(numKeys); + final Map keyValues = generateRandomKeyValues(keys, 10); + putKeysAndValues(keyValues); + + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB txnDB = + TransactionDB.open(options, txnDbOptions, dbFolder.getRoot().getAbsolutePath())) { + try (final Transaction transaction = txnDB.beginTransaction(new WriteOptions())) { + final List values = transaction.multiGetForUpdateAsList(new ReadOptions(), keys); + assertKeysAndValues(keys, keyValues, values); + } + } + } + + /** + * Test for transactional multiGet + * problem + */ + @Test + public void multiGetAsListLargeTransactionalCF() throws RocksDBException { + final List keys = generateRandomKeys(numKeys); + final Map keyValues = generateRandomKeyValues(keys, 10); + final ColumnFamilyDescriptor columnFamilyDescriptor = + new ColumnFamilyDescriptor("cfTest".getBytes()); + putKeysAndValues(columnFamilyDescriptor, keyValues); + + final List columnFamilyDescriptors = new ArrayList<>(); + columnFamilyDescriptors.add(columnFamilyDescriptor); + columnFamilyDescriptors.add(new ColumnFamilyDescriptor("default".getBytes())); + final List columnFamilyHandles = new ArrayList<>(); + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB txnDB = TransactionDB.open(new DBOptions(options), txnDbOptions, + dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) { + final List columnFamilyHandlesForMultiGet = new ArrayList<>(numKeys); + for (int i = 0; i < numKeys; i++) + columnFamilyHandlesForMultiGet.add(columnFamilyHandles.get(0)); + try (final Transaction transaction = txnDB.beginTransaction(new WriteOptions())) { + final List values = + transaction.multiGetAsList(new ReadOptions(), columnFamilyHandlesForMultiGet, keys); + assertKeysAndValues(keys, keyValues, values); + } + for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + columnFamilyHandle.close(); + } + } + } + + /** + * Test for transactional multiGet + * problem + */ + @Test + public void multiGetForUpdateAsListLargeTransactionalCF() throws RocksDBException { + final List keys = generateRandomKeys(numKeys); + final Map keyValues = generateRandomKeyValues(keys, 10); + final ColumnFamilyDescriptor columnFamilyDescriptor = + new ColumnFamilyDescriptor("cfTest".getBytes()); + putKeysAndValues(columnFamilyDescriptor, keyValues); + + final List columnFamilyDescriptors = new ArrayList<>(); + columnFamilyDescriptors.add(columnFamilyDescriptor); + columnFamilyDescriptors.add(new ColumnFamilyDescriptor("default".getBytes())); + final List columnFamilyHandles = new ArrayList<>(); + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB txnDB = TransactionDB.open(new DBOptions(options), txnDbOptions, + dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) { + final List columnFamilyHandlesForMultiGet = new ArrayList<>(numKeys); + for (int i = 0; i < numKeys; i++) + columnFamilyHandlesForMultiGet.add(columnFamilyHandles.get(0)); + try (final Transaction transaction = txnDB.beginTransaction(new WriteOptions())) { + final List values = transaction.multiGetForUpdateAsList( + new ReadOptions(), columnFamilyHandlesForMultiGet, keys); + assertKeysAndValues(keys, keyValues, values); + } + for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + columnFamilyHandle.close(); + } + } + } + + private List generateRandomKeys(final int numKeys) { + final Random rand = new Random(); + final List keys = new ArrayList<>(); + for (int i = 0; i < numKeys; i++) { + final byte[] key = new byte[4]; + rand.nextBytes(key); + keys.add(key); + } + return keys; + } + + private Map generateRandomKeyValues(final List keys, final int percent) { + final Random rand = new Random(); + final Map keyValues = new HashMap<>(); + for (int i = 0; i < numKeys; i++) { + if (rand.nextInt(100) < percent) { + final byte[] value = new byte[1024]; + rand.nextBytes(value); + keyValues.put(new Key(keys.get(i)), value); + } + } + return keyValues; + } + + private void putKeysAndValues(Map keyValues) throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + for (Map.Entry keyValue : keyValues.entrySet()) { + db.put(keyValue.getKey().get(), keyValue.getValue()); + } + } + } + + private void putKeysAndValues(ColumnFamilyDescriptor columnFamilyDescriptor, + Map keyValues) throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyHandle columnFamilyHandle = + db.createColumnFamily(columnFamilyDescriptor)) { + for (Map.Entry keyValue : keyValues.entrySet()) { + db.put(columnFamilyHandle, keyValue.getKey().get(), keyValue.getValue()); + } + } + } + + private void assertKeysAndValues( + final List keys, final Map keyValues, final List values) { + assertThat(values.size()).isEqualTo(keys.size()); + for (int i = 0; i < numKeys; i++) { + final Key key = new Key(keys.get(i)); + final byte[] value = values.get(i); + if (keyValues.containsKey(key)) { + assertThat(value).isEqualTo(keyValues.get(key)); + } else { + assertThat(value).isNull(); + } + } + } + + static private class Key { + private final byte[] bytes; + public Key(byte[] bytes) { + this.bytes = bytes; + } + + public byte[] get() { + return this.bytes; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + Key key = (Key) o; + return Arrays.equals(bytes, key.bytes); + } + + @Override + public int hashCode() { + return Arrays.hashCode(bytes); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MultiGetTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MultiGetTest.java new file mode 100644 index 000000000..323a6b1f4 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/MultiGetTest.java @@ -0,0 +1,525 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.util.TestUtil; + +public class MultiGetTest { + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void putNThenMultiGet() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1ForKey1".getBytes()); + db.put("key2".getBytes(), "value2ForKey2".getBytes()); + db.put("key3".getBytes(), "value3ForKey3".getBytes()); + final List keys = + Arrays.asList("key1".getBytes(), "key2".getBytes(), "key3".getBytes()); + final List values = db.multiGetAsList(keys); + assertThat(values.size()).isEqualTo(keys.size()); + assertThat(values.get(0)).isEqualTo("value1ForKey1".getBytes()); + assertThat(values.get(1)).isEqualTo("value2ForKey2".getBytes()); + assertThat(values.get(2)).isEqualTo("value3ForKey3".getBytes()); + } + } + + @Test + public void putNThenMultiGetDirect() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1ForKey1".getBytes()); + db.put("key2".getBytes(), "value2ForKey2".getBytes()); + db.put("key3".getBytes(), "value3ForKey3".getBytes()); + + final List keys = new ArrayList<>(); + keys.add(ByteBuffer.allocateDirect(12).put("key1".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key2".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key3".getBytes())); + // Java8 and lower flip() returns Buffer not ByteBuffer, so can't chain above /\/\ + for (final ByteBuffer key : keys) { + key.flip(); + } + final List values = new ArrayList<>(); + for (int i = 0; i < keys.size(); i++) { + values.add(ByteBuffer.allocateDirect(24)); + } + + { + final List results = db.multiGetByteBuffers(keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.Ok); + + assertThat(results.get(0).requiredSize).isEqualTo("value1ForKey1".getBytes().length); + assertThat(results.get(1).requiredSize).isEqualTo("value2ForKey2".getBytes().length); + assertThat(results.get(2).requiredSize).isEqualTo("value3ForKey3".getBytes().length); + + assertThat(TestUtil.bufferBytes(results.get(0).value)) + .isEqualTo("value1ForKey1".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(1).value)) + .isEqualTo("value2ForKey2".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(2).value)) + .isEqualTo("value3ForKey3".getBytes()); + } + + { + final List results = + db.multiGetByteBuffers(new ReadOptions(), keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.Ok); + + assertThat(results.get(0).requiredSize).isEqualTo("value1ForKey1".getBytes().length); + assertThat(results.get(1).requiredSize).isEqualTo("value2ForKey2".getBytes().length); + assertThat(results.get(2).requiredSize).isEqualTo("value3ForKey3".getBytes().length); + + assertThat(TestUtil.bufferBytes(results.get(0).value)) + .isEqualTo("value1ForKey1".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(1).value)) + .isEqualTo("value2ForKey2".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(2).value)) + .isEqualTo("value3ForKey3".getBytes()); + } + } + } + + @Test + public void putNThenMultiGetDirectSliced() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1ForKey1".getBytes()); + db.put("key2".getBytes(), "value2ForKey2".getBytes()); + db.put("key3".getBytes(), "value3ForKey3".getBytes()); + + final List keys = new ArrayList<>(); + keys.add(ByteBuffer.allocateDirect(12).put("key2".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key3".getBytes())); + keys.add( + ByteBuffer.allocateDirect(12).put("prefix1".getBytes()).slice().put("key1".getBytes())); + // Java8 and lower flip() returns Buffer not ByteBuffer, so can't chain above /\/\ + for (final ByteBuffer key : keys) { + key.flip(); + } + final List values = new ArrayList<>(); + for (int i = 0; i < keys.size(); i++) { + values.add(ByteBuffer.allocateDirect(24)); + } + + { + final List results = db.multiGetByteBuffers(keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.Ok); + + assertThat(results.get(1).requiredSize).isEqualTo("value2ForKey2".getBytes().length); + assertThat(results.get(2).requiredSize).isEqualTo("value3ForKey3".getBytes().length); + assertThat(results.get(0).requiredSize).isEqualTo("value1ForKey1".getBytes().length); + + assertThat(TestUtil.bufferBytes(results.get(0).value)) + .isEqualTo("value2ForKey2".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(1).value)) + .isEqualTo("value3ForKey3".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(2).value)) + .isEqualTo("value1ForKey1".getBytes()); + } + } + } + + @Test + public void putNThenMultiGetDirectBadValuesArray() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1ForKey1".getBytes()); + db.put("key2".getBytes(), "value2ForKey2".getBytes()); + db.put("key3".getBytes(), "value3ForKey3".getBytes()); + + final List keys = new ArrayList<>(); + keys.add(ByteBuffer.allocateDirect(12).put("key1".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key2".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key3".getBytes())); + // Java8 and lower flip() returns Buffer not ByteBuffer, so can't chain above /\/\ + for (final ByteBuffer key : keys) { + key.flip(); + } + + { + final List values = new ArrayList<>(); + for (int i = 0; i < keys.size(); i++) { + values.add(ByteBuffer.allocateDirect(24)); + } + + values.remove(0); + + try { + db.multiGetByteBuffers(keys, values); + fail("Expected exception when not enough value ByteBuffers supplied"); + } catch (final IllegalArgumentException e) { + assertThat(e.getMessage()).contains("For each key there must be a corresponding value"); + } + } + + { + final List values = new ArrayList<>(); + for (int i = 0; i < keys.size(); i++) { + values.add(ByteBuffer.allocateDirect(24)); + } + + values.add(ByteBuffer.allocateDirect(24)); + + try { + db.multiGetByteBuffers(keys, values); + fail("Expected exception when too many value ByteBuffers supplied"); + } catch (final IllegalArgumentException e) { + assertThat(e.getMessage()).contains("For each key there must be a corresponding value"); + } + } + } + } + + @Test + public void putNThenMultiGetDirectShortValueBuffers() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1ForKey1".getBytes()); + db.put("key2".getBytes(), "value2ForKey2".getBytes()); + db.put("key3".getBytes(), "value3ForKey3".getBytes()); + + final List keys = new ArrayList<>(); + keys.add(ByteBuffer.allocateDirect(12).put("key1".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key2".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key3".getBytes())); + // Java8 and lower flip() returns Buffer not ByteBuffer, so can't chain above /\/\ + for (final ByteBuffer key : keys) { + key.flip(); + } + + { + final List values = new ArrayList<>(); + for (int i = 0; i < keys.size(); i++) { + values.add(ByteBuffer.allocateDirect(4)); + } + + final List statii = db.multiGetByteBuffers(keys, values); + assertThat(statii.size()).isEqualTo(values.size()); + for (final ByteBufferGetStatus status : statii) { + assertThat(status.status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(status.requiredSize).isEqualTo("value3ForKey3".getBytes().length); + final ByteBuffer expected = + ByteBuffer.allocateDirect(24).put(Arrays.copyOf("valueX".getBytes(), 4)); + expected.flip(); + assertThat(status.value).isEqualTo(expected); + } + } + } + } + + @Test + public void putNThenMultiGetDirectNondefaultCF() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + final List cfDescriptors = new ArrayList<>(0); + cfDescriptors.add(new ColumnFamilyDescriptor("cf0".getBytes())); + cfDescriptors.add(new ColumnFamilyDescriptor("cf1".getBytes())); + cfDescriptors.add(new ColumnFamilyDescriptor("cf2".getBytes())); + + final List cf = db.createColumnFamilies(cfDescriptors); + + db.put(cf.get(0), "key1".getBytes(), "value1ForKey1".getBytes()); + db.put(cf.get(0), "key2".getBytes(), "value2ForKey2".getBytes()); + db.put(cf.get(0), "key3".getBytes(), "value3ForKey3".getBytes()); + + final List keys = new ArrayList<>(); + keys.add(ByteBuffer.allocateDirect(12).put("key1".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key2".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key3".getBytes())); + // Java8 and lower flip() returns Buffer not ByteBuffer, so can't chain above /\/\ + for (final ByteBuffer key : keys) { + key.flip(); + } + final List values = new ArrayList<>(); + for (int i = 0; i < keys.size(); i++) { + values.add(ByteBuffer.allocateDirect(24)); + } + + { + final List results = db.multiGetByteBuffers(keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.NotFound); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.NotFound); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.NotFound); + } + + { + final List columnFamilyHandles = new ArrayList<>(); + columnFamilyHandles.add(cf.get(0)); + final List results = + db.multiGetByteBuffers(columnFamilyHandles, keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.Ok); + + assertThat(results.get(0).requiredSize).isEqualTo("value1ForKey1".getBytes().length); + assertThat(results.get(1).requiredSize).isEqualTo("value2ForKey2".getBytes().length); + assertThat(results.get(2).requiredSize).isEqualTo("value3ForKey3".getBytes().length); + + assertThat(TestUtil.bufferBytes(results.get(0).value)) + .isEqualTo("value1ForKey1".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(1).value)) + .isEqualTo("value2ForKey2".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(2).value)) + .isEqualTo("value3ForKey3".getBytes()); + } + + { + final List columnFamilyHandles = new ArrayList<>(); + columnFamilyHandles.add(cf.get(0)); + columnFamilyHandles.add(cf.get(0)); + columnFamilyHandles.add(cf.get(0)); + final List results = + db.multiGetByteBuffers(columnFamilyHandles, keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.Ok); + + assertThat(results.get(0).requiredSize).isEqualTo("value1ForKey1".getBytes().length); + assertThat(results.get(1).requiredSize).isEqualTo("value2ForKey2".getBytes().length); + assertThat(results.get(2).requiredSize).isEqualTo("value3ForKey3".getBytes().length); + + assertThat(TestUtil.bufferBytes(results.get(0).value)) + .isEqualTo("value1ForKey1".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(1).value)) + .isEqualTo("value2ForKey2".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(2).value)) + .isEqualTo("value3ForKey3".getBytes()); + } + } + } + + @Test + public void putNThenMultiGetDirectCFParams() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1ForKey1".getBytes()); + db.put("key2".getBytes(), "value2ForKey2".getBytes()); + db.put("key3".getBytes(), "value3ForKey3".getBytes()); + + final List columnFamilyHandles = new ArrayList<>(); + columnFamilyHandles.add(db.getDefaultColumnFamily()); + columnFamilyHandles.add(db.getDefaultColumnFamily()); + + final List keys = new ArrayList<>(); + keys.add(ByteBuffer.allocateDirect(12).put("key1".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key2".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key3".getBytes())); + // Java8 and lower flip() returns Buffer not ByteBuffer, so can't chain above /\/\ + for (final ByteBuffer key : keys) { + key.flip(); + } + final List values = new ArrayList<>(); + for (int i = 0; i < keys.size(); i++) { + values.add(ByteBuffer.allocateDirect(24)); + } + try { + db.multiGetByteBuffers(columnFamilyHandles, keys, values); + fail("Expected exception when 2 column families supplied"); + } catch (final IllegalArgumentException e) { + assertThat(e.getMessage()).contains("Wrong number of ColumnFamilyHandle(s) supplied"); + } + + columnFamilyHandles.clear(); + columnFamilyHandles.add(db.getDefaultColumnFamily()); + final List results = + db.multiGetByteBuffers(columnFamilyHandles, keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.Ok); + + assertThat(results.get(0).requiredSize).isEqualTo("value1ForKey1".getBytes().length); + assertThat(results.get(1).requiredSize).isEqualTo("value2ForKey2".getBytes().length); + assertThat(results.get(2).requiredSize).isEqualTo("value3ForKey3".getBytes().length); + + assertThat(TestUtil.bufferBytes(results.get(0).value)).isEqualTo("value1ForKey1".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(1).value)).isEqualTo("value2ForKey2".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(2).value)).isEqualTo("value3ForKey3".getBytes()); + } + } + + @Test + public void putNThenMultiGetDirectMixedCF() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + final List cfDescriptors = new ArrayList<>(); + cfDescriptors.add(new ColumnFamilyDescriptor("cf0".getBytes())); + cfDescriptors.add(new ColumnFamilyDescriptor("cf1".getBytes())); + cfDescriptors.add(new ColumnFamilyDescriptor("cf2".getBytes())); + cfDescriptors.add(new ColumnFamilyDescriptor("cf3".getBytes())); + + final List cf = db.createColumnFamilies(cfDescriptors); + + db.put(cf.get(1), "key1".getBytes(), "value1ForKey1".getBytes()); + db.put("key2".getBytes(), "value2ForKey2".getBytes()); + db.put(cf.get(3), "key3".getBytes(), "value3ForKey3".getBytes()); + + final List keys = new ArrayList<>(); + keys.add(ByteBuffer.allocateDirect(12).put("key1".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key2".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key3".getBytes())); + // Java8 and lower flip() returns Buffer not ByteBuffer, so can't chain above /\/\ + for (final ByteBuffer key : keys) { + key.flip(); + } + final List values = new ArrayList<>(); + for (int i = 0; i < keys.size(); i++) { + values.add(ByteBuffer.allocateDirect(24)); + } + + { + final List columnFamilyHandles = new ArrayList<>(); + columnFamilyHandles.add(db.getDefaultColumnFamily()); + + final List results = + db.multiGetByteBuffers(columnFamilyHandles, keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.NotFound); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.NotFound); + + assertThat(results.get(1).requiredSize).isEqualTo("value2ForKey2".getBytes().length); + + assertThat(TestUtil.bufferBytes(results.get(1).value)) + .isEqualTo("value2ForKey2".getBytes()); + } + + { + final List columnFamilyHandles = new ArrayList<>(); + columnFamilyHandles.add(cf.get(1)); + + final List results = + db.multiGetByteBuffers(columnFamilyHandles, keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.NotFound); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.NotFound); + + assertThat(results.get(0).requiredSize).isEqualTo("value2ForKey2".getBytes().length); + + assertThat(TestUtil.bufferBytes(results.get(0).value)) + .isEqualTo("value1ForKey1".getBytes()); + } + + { + final List columnFamilyHandles = new ArrayList<>(); + columnFamilyHandles.add(cf.get(1)); + columnFamilyHandles.add(db.getDefaultColumnFamily()); + columnFamilyHandles.add(cf.get(3)); + + final List results = + db.multiGetByteBuffers(columnFamilyHandles, keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.Ok); + + assertThat(results.get(0).requiredSize).isEqualTo("value1ForKey1".getBytes().length); + assertThat(results.get(1).requiredSize).isEqualTo("value2ForKey2".getBytes().length); + assertThat(results.get(2).requiredSize).isEqualTo("value3ForKey3".getBytes().length); + + assertThat(TestUtil.bufferBytes(results.get(0).value)) + .isEqualTo("value1ForKey1".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(1).value)) + .isEqualTo("value2ForKey2".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(2).value)) + .isEqualTo("value3ForKey3".getBytes()); + } + + { + final List columnFamilyHandles = new ArrayList<>(); + columnFamilyHandles.add(db.getDefaultColumnFamily()); + columnFamilyHandles.add(cf.get(1)); + columnFamilyHandles.add(cf.get(3)); + + final List results = + db.multiGetByteBuffers(columnFamilyHandles, keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.NotFound); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.NotFound); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.Ok); + + assertThat(results.get(2).requiredSize).isEqualTo("value3ForKey3".getBytes().length); + + assertThat(TestUtil.bufferBytes(results.get(2).value)) + .isEqualTo("value3ForKey3".getBytes()); + } + } + } + + @Test + public void putNThenMultiGetDirectTruncateCF() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + final List cfDescriptors = new ArrayList<>(); + cfDescriptors.add(new ColumnFamilyDescriptor("cf0".getBytes())); + + final List cf = db.createColumnFamilies(cfDescriptors); + + db.put(cf.get(0), "key1".getBytes(), "value1ForKey1".getBytes()); + db.put(cf.get(0), "key2".getBytes(), "value2ForKey2WithLotsOfTrailingGarbage".getBytes()); + db.put(cf.get(0), "key3".getBytes(), "value3ForKey3".getBytes()); + + final List keys = new ArrayList<>(); + keys.add(ByteBuffer.allocateDirect(12).put("key1".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key2".getBytes())); + keys.add(ByteBuffer.allocateDirect(12).put("key3".getBytes())); + // Java8 and lower flip() returns Buffer not ByteBuffer, so can't chain above /\/\ + for (final ByteBuffer key : keys) { + key.flip(); + } + final List values = new ArrayList<>(); + for (int i = 0; i < keys.size(); i++) { + values.add(ByteBuffer.allocateDirect(24)); + } + + { + final List columnFamilyHandles = new ArrayList<>(); + columnFamilyHandles.add(cf.get(0)); + final List results = + db.multiGetByteBuffers(columnFamilyHandles, keys, values); + + assertThat(results.get(0).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(1).status.getCode()).isEqualTo(Status.Code.Ok); + assertThat(results.get(2).status.getCode()).isEqualTo(Status.Code.Ok); + + assertThat(results.get(0).requiredSize).isEqualTo("value1ForKey1".getBytes().length); + assertThat(results.get(1).requiredSize) + .isEqualTo("value2ForKey2WithLotsOfTrailingGarbage".getBytes().length); + assertThat(results.get(2).requiredSize).isEqualTo("value3ForKey3".getBytes().length); + + assertThat(TestUtil.bufferBytes(results.get(0).value)) + .isEqualTo("value1ForKey1".getBytes()); + assertThat(TestUtil.bufferBytes(results.get(1).value)) + .isEqualTo("valu e2Fo rKey 2Wit hLot sOfT".replace(" ", "").getBytes()); + assertThat(TestUtil.bufferBytes(results.get(2).value)) + .isEqualTo("value3ForKey3".getBytes()); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java new file mode 100644 index 000000000..b2b2599a7 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/MutableColumnFamilyOptionsTest.java @@ -0,0 +1,167 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.Test; +import org.rocksdb.MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder; + +import java.util.NoSuchElementException; + +import static org.assertj.core.api.Assertions.assertThat; + +public class MutableColumnFamilyOptionsTest { + + @Test + public void builder() { + final MutableColumnFamilyOptionsBuilder builder = + MutableColumnFamilyOptions.builder(); + builder + .setWriteBufferSize(10) + .setInplaceUpdateNumLocks(5) + .setDisableAutoCompactions(true) + .setParanoidFileChecks(true); + + assertThat(builder.writeBufferSize()).isEqualTo(10); + assertThat(builder.inplaceUpdateNumLocks()).isEqualTo(5); + assertThat(builder.disableAutoCompactions()).isEqualTo(true); + assertThat(builder.paranoidFileChecks()).isEqualTo(true); + } + + @Test(expected = NoSuchElementException.class) + public void builder_getWhenNotSet() { + final MutableColumnFamilyOptionsBuilder builder = + MutableColumnFamilyOptions.builder(); + + builder.writeBufferSize(); + } + + @Test + public void builder_build() { + final MutableColumnFamilyOptions options = MutableColumnFamilyOptions + .builder() + .setWriteBufferSize(10) + .setParanoidFileChecks(true) + .build(); + + assertThat(options.getKeys().length).isEqualTo(2); + assertThat(options.getValues().length).isEqualTo(2); + assertThat(options.getKeys()[0]) + .isEqualTo( + MutableColumnFamilyOptions.MemtableOption.write_buffer_size.name()); + assertThat(options.getValues()[0]).isEqualTo("10"); + assertThat(options.getKeys()[1]) + .isEqualTo( + MutableColumnFamilyOptions.MiscOption.paranoid_file_checks.name()); + assertThat(options.getValues()[1]).isEqualTo("true"); + } + + @Test + public void mutableColumnFamilyOptions_toString() { + final String str = MutableColumnFamilyOptions.builder() + .setWriteBufferSize(10) + .setInplaceUpdateNumLocks(5) + .setDisableAutoCompactions(true) + .setParanoidFileChecks(true) + .setMaxBytesForLevelMultiplierAdditional(new int[] {2, 3, 5, 7, 11, 13}) + .build() + .toString(); + + assertThat(str).isEqualTo("write_buffer_size=10;inplace_update_num_locks=5;" + + "disable_auto_compactions=true;paranoid_file_checks=true;max_bytes_for_level_multiplier_additional=2:3:5:7:11:13"); + } + + @Test + public void mutableColumnFamilyOptions_parse() { + final String str = "write_buffer_size=10;inplace_update_num_locks=5;" + + "disable_auto_compactions=true;paranoid_file_checks=true;max_bytes_for_level_multiplier_additional=2:{3}:{5}:{7}:{11}:{13}"; + + final MutableColumnFamilyOptionsBuilder builder = + MutableColumnFamilyOptions.parse(str); + + assertThat(builder.writeBufferSize()).isEqualTo(10); + assertThat(builder.inplaceUpdateNumLocks()).isEqualTo(5); + assertThat(builder.disableAutoCompactions()).isEqualTo(true); + assertThat(builder.paranoidFileChecks()).isEqualTo(true); + assertThat(builder.maxBytesForLevelMultiplierAdditional()) + .isEqualTo(new int[] {2, 3, 5, 7, 11, 13}); + } + + /** + * Extended parsing test to deal with all the options which C++ may return. + * We have canned a set of options returned by {RocksDB#getOptions} + */ + @Test + public void mutableColumnFamilyOptions_parse_getOptions_output() { + final String optionsString = + "bottommost_compression=kDisableCompressionOption; sample_for_compression=0; " + + "blob_garbage_collection_age_cutoff=0.250000; blob_garbage_collection_force_threshold=0.800000;" + + "arena_block_size=1048576; enable_blob_garbage_collection=false; level0_stop_writes_trigger=36; min_blob_size=65536;" + + "blob_compaction_readahead_size=262144; blob_file_starting_level=5; prepopulate_blob_cache=kDisable;" + + "compaction_options_universal={allow_trivial_move=false;stop_style=kCompactionStopStyleTotalSize;min_merge_width=2;" + + "compression_size_percent=-1;max_size_amplification_percent=200;max_merge_width=4294967295;size_ratio=1;}; " + + "target_file_size_base=67108864; max_bytes_for_level_base=268435456; memtable_whole_key_filtering=false; " + + "soft_pending_compaction_bytes_limit=68719476736; blob_compression_type=kNoCompression; max_write_buffer_number=2; " + + "ttl=2592000; compaction_options_fifo={allow_compaction=false;age_for_warm=0;max_table_files_size=1073741824;}; " + + "check_flush_compaction_key_order=true; max_successive_merges=0; inplace_update_num_locks=10000; " + + "bottommost_compression_opts={enabled=false;parallel_threads=1;zstd_max_train_bytes=0;max_dict_bytes=0;" + + "strategy=0;max_dict_buffer_bytes=0;level=32767;window_bits=-14;}; " + + "target_file_size_multiplier=1; max_bytes_for_level_multiplier_additional=5:{7}:{9}:{11}:{13}:{15}:{17}; " + + "enable_blob_files=true; level0_slowdown_writes_trigger=20; compression=kLZ4HCCompression; level0_file_num_compaction_trigger=4; " + + "blob_file_size=268435456; prefix_extractor=nullptr; max_bytes_for_level_multiplier=10.000000; write_buffer_size=67108864; " + + "disable_auto_compactions=false; max_compaction_bytes=1677721600; memtable_huge_page_size=0; " + + "compression_opts={enabled=false;parallel_threads=1;zstd_max_train_bytes=0;max_dict_bytes=0;strategy=0;max_dict_buffer_bytes=0;" + + "level=32767;window_bits=-14;}; " + + "hard_pending_compaction_bytes_limit=274877906944; periodic_compaction_seconds=0; paranoid_file_checks=true; " + + "memtable_prefix_bloom_size_ratio=7.500000; max_sequential_skip_in_iterations=8; report_bg_io_stats=true; " + + "compaction_pri=kMinOverlappingRatio; compaction_style=kCompactionStyleLevel; memtable_factory=SkipListFactory; " + + "comparator=leveldb.BytewiseComparator; bloom_locality=0; compaction_filter_factory=nullptr; " + + "min_write_buffer_number_to_merge=1; max_write_buffer_number_to_maintain=0; compaction_filter=nullptr; merge_operator=nullptr; " + + "num_levels=7; optimize_filters_for_hits=false; force_consistency_checks=true; table_factory=BlockBasedTable; " + + "max_write_buffer_size_to_maintain=0; memtable_insert_with_hint_prefix_extractor=nullptr; level_compaction_dynamic_level_bytes=false; " + + "inplace_update_support=false; experimental_mempurge_threshold=0.003"; + + MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder cf = + MutableColumnFamilyOptions.parse(optionsString, true); + + // Check the values from the parsed string which are column family options + assertThat(cf.blobGarbageCollectionAgeCutoff()).isEqualTo(0.25); + assertThat(cf.blobGarbageCollectionForceThreshold()).isEqualTo(0.80); + assertThat(cf.arenaBlockSize()).isEqualTo(1048576); + assertThat(cf.enableBlobGarbageCollection()).isEqualTo(false); + assertThat(cf.level0StopWritesTrigger()).isEqualTo(36); + assertThat(cf.minBlobSize()).isEqualTo(65536); + assertThat(cf.blobCompactionReadaheadSize()).isEqualTo(262144); + assertThat(cf.blobFileStartingLevel()).isEqualTo(5); + assertThat(cf.prepopulateBlobCache()).isEqualTo(PrepopulateBlobCache.PREPOPULATE_BLOB_DISABLE); + assertThat(cf.targetFileSizeBase()).isEqualTo(67108864); + assertThat(cf.maxBytesForLevelBase()).isEqualTo(268435456); + assertThat(cf.softPendingCompactionBytesLimit()).isEqualTo(68719476736L); + assertThat(cf.blobCompressionType()).isEqualTo(CompressionType.NO_COMPRESSION); + assertThat(cf.maxWriteBufferNumber()).isEqualTo(2); + assertThat(cf.ttl()).isEqualTo(2592000); + assertThat(cf.maxSuccessiveMerges()).isEqualTo(0); + assertThat(cf.inplaceUpdateNumLocks()).isEqualTo(10000); + assertThat(cf.targetFileSizeMultiplier()).isEqualTo(1); + assertThat(cf.maxBytesForLevelMultiplierAdditional()) + .isEqualTo(new int[] {5, 7, 9, 11, 13, 15, 17}); + assertThat(cf.enableBlobFiles()).isEqualTo(true); + assertThat(cf.level0SlowdownWritesTrigger()).isEqualTo(20); + assertThat(cf.compressionType()).isEqualTo(CompressionType.LZ4HC_COMPRESSION); + assertThat(cf.level0FileNumCompactionTrigger()).isEqualTo(4); + assertThat(cf.blobFileSize()).isEqualTo(268435456); + assertThat(cf.maxBytesForLevelMultiplier()).isEqualTo(10.0); + assertThat(cf.writeBufferSize()).isEqualTo(67108864); + assertThat(cf.disableAutoCompactions()).isEqualTo(false); + assertThat(cf.maxCompactionBytes()).isEqualTo(1677721600); + assertThat(cf.memtableHugePageSize()).isEqualTo(0); + assertThat(cf.hardPendingCompactionBytesLimit()).isEqualTo(274877906944L); + assertThat(cf.periodicCompactionSeconds()).isEqualTo(0); + assertThat(cf.paranoidFileChecks()).isEqualTo(true); + assertThat(cf.memtablePrefixBloomSizeRatio()).isEqualTo(7.5); + assertThat(cf.experimentalMempurgeThreshold()).isEqualTo(0.003); + assertThat(cf.maxSequentialSkipInIterations()).isEqualTo(8); + assertThat(cf.reportBgIoStats()).isEqualTo(true); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java new file mode 100644 index 000000000..063a8de38 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/MutableDBOptionsTest.java @@ -0,0 +1,85 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.Test; +import org.rocksdb.MutableDBOptions.MutableDBOptionsBuilder; + +import java.util.NoSuchElementException; + +import static org.assertj.core.api.Assertions.assertThat; + +public class MutableDBOptionsTest { + + @Test + public void builder() { + final MutableDBOptionsBuilder builder = + MutableDBOptions.builder(); + builder + .setBytesPerSync(1024 * 1024 * 7) + .setMaxBackgroundJobs(5) + .setAvoidFlushDuringShutdown(false); + + assertThat(builder.bytesPerSync()).isEqualTo(1024 * 1024 * 7); + assertThat(builder.maxBackgroundJobs()).isEqualTo(5); + assertThat(builder.avoidFlushDuringShutdown()).isEqualTo(false); + } + + @Test(expected = NoSuchElementException.class) + public void builder_getWhenNotSet() { + final MutableDBOptionsBuilder builder = + MutableDBOptions.builder(); + + builder.bytesPerSync(); + } + + @Test + public void builder_build() { + final MutableDBOptions options = MutableDBOptions + .builder() + .setBytesPerSync(1024 * 1024 * 7) + .setMaxBackgroundJobs(5) + .build(); + + assertThat(options.getKeys().length).isEqualTo(2); + assertThat(options.getValues().length).isEqualTo(2); + assertThat(options.getKeys()[0]) + .isEqualTo( + MutableDBOptions.DBOption.bytes_per_sync.name()); + assertThat(options.getValues()[0]).isEqualTo("7340032"); + assertThat(options.getKeys()[1]) + .isEqualTo( + MutableDBOptions.DBOption.max_background_jobs.name()); + assertThat(options.getValues()[1]).isEqualTo("5"); + } + + @Test + public void mutableDBOptions_toString() { + final String str = MutableDBOptions + .builder() + .setMaxOpenFiles(99) + .setDelayedWriteRate(789) + .setAvoidFlushDuringShutdown(true) + .setStrictBytesPerSync(true) + .build() + .toString(); + + assertThat(str).isEqualTo("max_open_files=99;delayed_write_rate=789;" + + "avoid_flush_during_shutdown=true;strict_bytes_per_sync=true"); + } + + @Test + public void mutableDBOptions_parse() { + final String str = "max_open_files=99;delayed_write_rate=789;" + + "avoid_flush_during_shutdown=true"; + + final MutableDBOptionsBuilder builder = + MutableDBOptions.parse(str); + + assertThat(builder.maxOpenFiles()).isEqualTo(99); + assertThat(builder.delayedWriteRate()).isEqualTo(789); + assertThat(builder.avoidFlushDuringShutdown()).isEqualTo(true); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/MutableOptionsGetSetTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/MutableOptionsGetSetTest.java new file mode 100644 index 000000000..6db940619 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/MutableOptionsGetSetTest.java @@ -0,0 +1,429 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class MutableOptionsGetSetTest { + final int minBlobSize = 65536; + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + /** + * Validate the round-trip of blob options into and out of the C++ core of RocksDB + * From CF options on CF Creation to {RocksDB#getOptions} + * Uses 2x column families with different values for their options. + * NOTE that some constraints are applied to the options in the C++ core, + * e.g. on {ColumnFamilyOptions#setMemtablePrefixBloomSizeRatio} + * + * @throws RocksDBException if the database throws an exception + */ + @Test + public void testGetMutableBlobOptionsAfterCreate() throws RocksDBException { + final ColumnFamilyOptions columnFamilyOptions0 = new ColumnFamilyOptions(); + final ColumnFamilyDescriptor columnFamilyDescriptor0 = + new ColumnFamilyDescriptor("default".getBytes(UTF_8), columnFamilyOptions0); + final List columnFamilyDescriptors = + Collections.singletonList(columnFamilyDescriptor0); + final List columnFamilyHandles = new ArrayList<>(); + + try (final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(dbOptions, dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + try (final ColumnFamilyOptions columnFamilyOptions1 = + new ColumnFamilyOptions() + .setMinBlobSize(minBlobSize) + .setEnableBlobFiles(true) + .setBlobGarbageCollectionAgeCutoff(0.25) + .setBlobGarbageCollectionForceThreshold(0.80) + .setBlobCompactionReadaheadSize(262144) + .setBlobFileStartingLevel(2) + .setArenaBlockSize(42) + .setMemtablePrefixBloomSizeRatio(0.17) + .setExperimentalMempurgeThreshold(0.005) + .setMemtableWholeKeyFiltering(false) + .setMemtableHugePageSize(3) + .setMaxSuccessiveMerges(4) + .setMaxWriteBufferNumber(12) + .setInplaceUpdateNumLocks(16) + .setDisableAutoCompactions(false) + .setSoftPendingCompactionBytesLimit(112) + .setHardPendingCompactionBytesLimit(280) + .setLevel0FileNumCompactionTrigger(200) + .setLevel0SlowdownWritesTrigger(312) + .setLevel0StopWritesTrigger(584) + .setMaxCompactionBytes(12) + .setTargetFileSizeBase(99) + .setTargetFileSizeMultiplier(112) + .setMaxSequentialSkipInIterations(50) + .setReportBgIoStats(true); + + final ColumnFamilyOptions columnFamilyOptions2 = + new ColumnFamilyOptions() + .setMinBlobSize(minBlobSize) + .setEnableBlobFiles(false) + .setArenaBlockSize(42) + .setMemtablePrefixBloomSizeRatio(0.236) + .setExperimentalMempurgeThreshold(0.247) + .setMemtableWholeKeyFiltering(true) + .setMemtableHugePageSize(8) + .setMaxSuccessiveMerges(12) + .setMaxWriteBufferNumber(22) + .setInplaceUpdateNumLocks(160) + .setDisableAutoCompactions(true) + .setSoftPendingCompactionBytesLimit(1124) + .setHardPendingCompactionBytesLimit(2800) + .setLevel0FileNumCompactionTrigger(2000) + .setLevel0SlowdownWritesTrigger(5840) + .setLevel0StopWritesTrigger(31200) + .setMaxCompactionBytes(112) + .setTargetFileSizeBase(999) + .setTargetFileSizeMultiplier(1120) + .setMaxSequentialSkipInIterations(24) + .setReportBgIoStats(true)) { + final ColumnFamilyDescriptor columnFamilyDescriptor1 = + new ColumnFamilyDescriptor("column_family_1".getBytes(UTF_8), columnFamilyOptions1); + final ColumnFamilyDescriptor columnFamilyDescriptor2 = + new ColumnFamilyDescriptor("column_family_2".getBytes(UTF_8), columnFamilyOptions2); + + // Create the column family with blob options + final ColumnFamilyHandle columnFamilyHandle1 = + db.createColumnFamily(columnFamilyDescriptor1); + final ColumnFamilyHandle columnFamilyHandle2 = + db.createColumnFamily(columnFamilyDescriptor2); + + // Check the getOptions() brings back the creation options for CF1 + final MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder builder1 = + db.getOptions(columnFamilyHandle1); + assertThat(builder1.enableBlobFiles()).isEqualTo(true); + assertThat(builder1.blobGarbageCollectionAgeCutoff()).isEqualTo(0.25); + assertThat(builder1.blobGarbageCollectionForceThreshold()).isEqualTo(0.80); + assertThat(builder1.blobCompactionReadaheadSize()).isEqualTo(262144); + assertThat(builder1.blobFileStartingLevel()).isEqualTo(2); + assertThat(builder1.minBlobSize()).isEqualTo(minBlobSize); + assertThat(builder1.arenaBlockSize()).isEqualTo(42); + assertThat(builder1.memtablePrefixBloomSizeRatio()).isEqualTo(0.17); + assertThat(builder1.experimentalMempurgeThreshold()).isEqualTo(0.005); + assertThat(builder1.memtableWholeKeyFiltering()).isEqualTo(false); + assertThat(builder1.memtableHugePageSize()).isEqualTo(3); + assertThat(builder1.maxSuccessiveMerges()).isEqualTo(4); + assertThat(builder1.maxWriteBufferNumber()).isEqualTo(12); + assertThat(builder1.inplaceUpdateNumLocks()).isEqualTo(16); + assertThat(builder1.disableAutoCompactions()).isEqualTo(false); + assertThat(builder1.softPendingCompactionBytesLimit()).isEqualTo(112); + assertThat(builder1.hardPendingCompactionBytesLimit()).isEqualTo(280); + assertThat(builder1.level0FileNumCompactionTrigger()).isEqualTo(200); + assertThat(builder1.level0SlowdownWritesTrigger()).isEqualTo(312); + assertThat(builder1.level0StopWritesTrigger()).isEqualTo(584); + assertThat(builder1.maxCompactionBytes()).isEqualTo(12); + assertThat(builder1.targetFileSizeBase()).isEqualTo(99); + assertThat(builder1.targetFileSizeMultiplier()).isEqualTo(112); + assertThat(builder1.maxSequentialSkipInIterations()).isEqualTo(50); + assertThat(builder1.reportBgIoStats()).isEqualTo(true); + + // Check the getOptions() brings back the creation options for CF2 + final MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder builder2 = + db.getOptions(columnFamilyHandle2); + assertThat(builder2.enableBlobFiles()).isEqualTo(false); + assertThat(builder2.minBlobSize()).isEqualTo(minBlobSize); + assertThat(builder2.arenaBlockSize()).isEqualTo(42); + assertThat(builder2.memtablePrefixBloomSizeRatio()).isEqualTo(0.236); + assertThat(builder2.experimentalMempurgeThreshold()).isEqualTo(0.247); + assertThat(builder2.memtableWholeKeyFiltering()).isEqualTo(true); + assertThat(builder2.memtableHugePageSize()).isEqualTo(8); + assertThat(builder2.maxSuccessiveMerges()).isEqualTo(12); + assertThat(builder2.maxWriteBufferNumber()).isEqualTo(22); + assertThat(builder2.inplaceUpdateNumLocks()).isEqualTo(160); + assertThat(builder2.disableAutoCompactions()).isEqualTo(true); + assertThat(builder2.softPendingCompactionBytesLimit()).isEqualTo(1124); + assertThat(builder2.hardPendingCompactionBytesLimit()).isEqualTo(2800); + assertThat(builder2.level0FileNumCompactionTrigger()).isEqualTo(2000); + assertThat(builder2.level0SlowdownWritesTrigger()).isEqualTo(5840); + assertThat(builder2.level0StopWritesTrigger()).isEqualTo(31200); + assertThat(builder2.maxCompactionBytes()).isEqualTo(112); + assertThat(builder2.targetFileSizeBase()).isEqualTo(999); + assertThat(builder2.targetFileSizeMultiplier()).isEqualTo(1120); + assertThat(builder2.maxSequentialSkipInIterations()).isEqualTo(24); + assertThat(builder2.reportBgIoStats()).isEqualTo(true); + } + } + } + + /** + * Validate the round-trip of blob options into and out of the C++ core of RocksDB + * From {RocksDB#setOptions} to {RocksDB#getOptions} + * Uses 2x column families with different values for their options. + * NOTE that some constraints are applied to the options in the C++ core, + * e.g. on {ColumnFamilyOptions#setMemtablePrefixBloomSizeRatio} + * + * @throws RocksDBException if a database access has an error + */ + @Test + public void testGetMutableBlobOptionsAfterSetCF() throws RocksDBException { + final ColumnFamilyOptions columnFamilyOptions0 = new ColumnFamilyOptions(); + final ColumnFamilyDescriptor columnFamilyDescriptor0 = + new ColumnFamilyDescriptor("default".getBytes(UTF_8), columnFamilyOptions0); + final List columnFamilyDescriptors = + Collections.singletonList(columnFamilyDescriptor0); + final List columnFamilyHandles = new ArrayList<>(); + + try (final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(dbOptions, dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + try (final ColumnFamilyOptions columnFamilyOptions1 = new ColumnFamilyOptions(); + + final ColumnFamilyOptions columnFamilyOptions2 = new ColumnFamilyOptions()) { + final ColumnFamilyDescriptor columnFamilyDescriptor1 = + new ColumnFamilyDescriptor("column_family_1".getBytes(UTF_8), columnFamilyOptions1); + final ColumnFamilyDescriptor columnFamilyDescriptor2 = + new ColumnFamilyDescriptor("column_family_2".getBytes(UTF_8), columnFamilyOptions2); + + // Create the column family with blob options + final ColumnFamilyHandle columnFamilyHandle1 = + db.createColumnFamily(columnFamilyDescriptor1); + final ColumnFamilyHandle columnFamilyHandle2 = + db.createColumnFamily(columnFamilyDescriptor2); + db.flush(new FlushOptions().setWaitForFlush(true)); + + final MutableColumnFamilyOptions + .MutableColumnFamilyOptionsBuilder mutableColumnFamilyOptions1 = + MutableColumnFamilyOptions.builder() + .setMinBlobSize(minBlobSize) + .setEnableBlobFiles(true) + .setBlobGarbageCollectionAgeCutoff(0.25) + .setBlobGarbageCollectionForceThreshold(0.80) + .setBlobCompactionReadaheadSize(262144) + .setBlobFileStartingLevel(3) + .setArenaBlockSize(42) + .setMemtablePrefixBloomSizeRatio(0.17) + .setExperimentalMempurgeThreshold(0.005) + .setMemtableWholeKeyFiltering(false) + .setMemtableHugePageSize(3) + .setMaxSuccessiveMerges(4) + .setMaxWriteBufferNumber(12) + .setInplaceUpdateNumLocks(16) + .setDisableAutoCompactions(false) + .setSoftPendingCompactionBytesLimit(112) + .setHardPendingCompactionBytesLimit(280) + .setLevel0FileNumCompactionTrigger(200) + .setLevel0SlowdownWritesTrigger(312) + .setLevel0StopWritesTrigger(584) + .setMaxCompactionBytes(12) + .setTargetFileSizeBase(99) + .setTargetFileSizeMultiplier(112); + db.setOptions(columnFamilyHandle1, mutableColumnFamilyOptions1.build()); + + // Check the getOptions() brings back the creation options for CF1 + final MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder builder1 = + db.getOptions(columnFamilyHandle1); + assertThat(builder1.enableBlobFiles()).isEqualTo(true); + assertThat(builder1.blobGarbageCollectionAgeCutoff()).isEqualTo(0.25); + assertThat(builder1.blobGarbageCollectionForceThreshold()).isEqualTo(0.80); + assertThat(builder1.blobCompactionReadaheadSize()).isEqualTo(262144); + assertThat(builder1.blobFileStartingLevel()).isEqualTo(3); + assertThat(builder1.minBlobSize()).isEqualTo(minBlobSize); + assertThat(builder1.arenaBlockSize()).isEqualTo(42); + assertThat(builder1.memtablePrefixBloomSizeRatio()).isEqualTo(0.17); + assertThat(builder1.experimentalMempurgeThreshold()).isEqualTo(0.005); + assertThat(builder1.memtableWholeKeyFiltering()).isEqualTo(false); + assertThat(builder1.memtableHugePageSize()).isEqualTo(3); + assertThat(builder1.maxSuccessiveMerges()).isEqualTo(4); + assertThat(builder1.maxWriteBufferNumber()).isEqualTo(12); + assertThat(builder1.inplaceUpdateNumLocks()).isEqualTo(16); + assertThat(builder1.disableAutoCompactions()).isEqualTo(false); + assertThat(builder1.softPendingCompactionBytesLimit()).isEqualTo(112); + assertThat(builder1.hardPendingCompactionBytesLimit()).isEqualTo(280); + assertThat(builder1.level0FileNumCompactionTrigger()).isEqualTo(200); + assertThat(builder1.level0SlowdownWritesTrigger()).isEqualTo(312); + assertThat(builder1.level0StopWritesTrigger()).isEqualTo(584); + assertThat(builder1.maxCompactionBytes()).isEqualTo(12); + assertThat(builder1.targetFileSizeBase()).isEqualTo(99); + assertThat(builder1.targetFileSizeMultiplier()).isEqualTo(112); + + final MutableColumnFamilyOptions + .MutableColumnFamilyOptionsBuilder mutableColumnFamilyOptions2 = + MutableColumnFamilyOptions.builder() + .setMinBlobSize(minBlobSize) + .setEnableBlobFiles(false) + .setArenaBlockSize(42) + .setMemtablePrefixBloomSizeRatio(0.236) + .setExperimentalMempurgeThreshold(0.247) + .setMemtableWholeKeyFiltering(true) + .setMemtableHugePageSize(8) + .setMaxSuccessiveMerges(12) + .setMaxWriteBufferNumber(22) + .setInplaceUpdateNumLocks(160) + .setDisableAutoCompactions(true) + .setSoftPendingCompactionBytesLimit(1124) + .setHardPendingCompactionBytesLimit(2800) + .setLevel0FileNumCompactionTrigger(2000) + .setLevel0SlowdownWritesTrigger(5840) + .setLevel0StopWritesTrigger(31200) + .setMaxCompactionBytes(112) + .setTargetFileSizeBase(999) + .setTargetFileSizeMultiplier(1120); + db.setOptions(columnFamilyHandle2, mutableColumnFamilyOptions2.build()); + + // Check the getOptions() brings back the creation options for CF2 + final MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder builder2 = + db.getOptions(columnFamilyHandle2); + assertThat(builder2.enableBlobFiles()).isEqualTo(false); + assertThat(builder2.minBlobSize()).isEqualTo(minBlobSize); + assertThat(builder2.arenaBlockSize()).isEqualTo(42); + assertThat(builder2.memtablePrefixBloomSizeRatio()).isEqualTo(0.236); + assertThat(builder2.experimentalMempurgeThreshold()).isEqualTo(0.247); + assertThat(builder2.memtableWholeKeyFiltering()).isEqualTo(true); + assertThat(builder2.memtableHugePageSize()).isEqualTo(8); + assertThat(builder2.maxSuccessiveMerges()).isEqualTo(12); + assertThat(builder2.maxWriteBufferNumber()).isEqualTo(22); + assertThat(builder2.inplaceUpdateNumLocks()).isEqualTo(160); + assertThat(builder2.disableAutoCompactions()).isEqualTo(true); + assertThat(builder2.softPendingCompactionBytesLimit()).isEqualTo(1124); + assertThat(builder2.hardPendingCompactionBytesLimit()).isEqualTo(2800); + assertThat(builder2.level0FileNumCompactionTrigger()).isEqualTo(2000); + assertThat(builder2.level0SlowdownWritesTrigger()).isEqualTo(5840); + assertThat(builder2.level0StopWritesTrigger()).isEqualTo(31200); + assertThat(builder2.maxCompactionBytes()).isEqualTo(112); + assertThat(builder2.targetFileSizeBase()).isEqualTo(999); + assertThat(builder2.targetFileSizeMultiplier()).isEqualTo(1120); + } + } + } + + /** + * Validate the round-trip of blob options into and out of the C++ core of RocksDB + * From {RocksDB#setOptions} to {RocksDB#getOptions} + * Uses 2x column families with different values for their options. + * NOTE that some constraints are applied to the options in the C++ core, + * e.g. on {ColumnFamilyOptions#setMemtablePrefixBloomSizeRatio} + * + * @throws RocksDBException if a database access has an error + */ + @Test + public void testGetMutableBlobOptionsAfterSet() throws RocksDBException { + final ColumnFamilyOptions columnFamilyOptions0 = new ColumnFamilyOptions(); + final ColumnFamilyDescriptor columnFamilyDescriptor0 = + new ColumnFamilyDescriptor("default".getBytes(UTF_8), columnFamilyOptions0); + final List columnFamilyDescriptors = + Collections.singletonList(columnFamilyDescriptor0); + final List columnFamilyHandles = new ArrayList<>(); + + try (final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(dbOptions, dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + final MutableColumnFamilyOptions + .MutableColumnFamilyOptionsBuilder mutableColumnFamilyOptions = + MutableColumnFamilyOptions.builder() + .setMinBlobSize(minBlobSize) + .setEnableBlobFiles(true) + .setBlobGarbageCollectionAgeCutoff(0.25) + .setBlobGarbageCollectionForceThreshold(0.80) + .setBlobCompactionReadaheadSize(131072) + .setBlobFileStartingLevel(4) + .setArenaBlockSize(42) + .setMemtablePrefixBloomSizeRatio(0.17) + .setExperimentalMempurgeThreshold(0.005) + .setMemtableWholeKeyFiltering(false) + .setMemtableHugePageSize(3) + .setMaxSuccessiveMerges(4) + .setMaxWriteBufferNumber(12) + .setInplaceUpdateNumLocks(16) + .setDisableAutoCompactions(false) + .setSoftPendingCompactionBytesLimit(112) + .setHardPendingCompactionBytesLimit(280) + .setLevel0FileNumCompactionTrigger(200) + .setLevel0SlowdownWritesTrigger(312) + .setLevel0StopWritesTrigger(584) + .setMaxCompactionBytes(12) + .setTargetFileSizeBase(99) + .setTargetFileSizeMultiplier(112); + db.setOptions(mutableColumnFamilyOptions.build()); + + // Check the getOptions() brings back the creation options for CF1 + final MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder builder1 = db.getOptions(); + assertThat(builder1.enableBlobFiles()).isEqualTo(true); + assertThat(builder1.blobGarbageCollectionAgeCutoff()).isEqualTo(0.25); + assertThat(builder1.blobGarbageCollectionForceThreshold()).isEqualTo(0.80); + assertThat(builder1.blobCompactionReadaheadSize()).isEqualTo(131072); + assertThat(builder1.blobFileStartingLevel()).isEqualTo(4); + assertThat(builder1.minBlobSize()).isEqualTo(minBlobSize); + assertThat(builder1.arenaBlockSize()).isEqualTo(42); + assertThat(builder1.memtablePrefixBloomSizeRatio()).isEqualTo(0.17); + assertThat(builder1.experimentalMempurgeThreshold()).isEqualTo(0.005); + assertThat(builder1.memtableWholeKeyFiltering()).isEqualTo(false); + assertThat(builder1.memtableHugePageSize()).isEqualTo(3); + assertThat(builder1.maxSuccessiveMerges()).isEqualTo(4); + assertThat(builder1.maxWriteBufferNumber()).isEqualTo(12); + assertThat(builder1.inplaceUpdateNumLocks()).isEqualTo(16); + assertThat(builder1.disableAutoCompactions()).isEqualTo(false); + assertThat(builder1.softPendingCompactionBytesLimit()).isEqualTo(112); + assertThat(builder1.hardPendingCompactionBytesLimit()).isEqualTo(280); + assertThat(builder1.level0FileNumCompactionTrigger()).isEqualTo(200); + assertThat(builder1.level0SlowdownWritesTrigger()).isEqualTo(312); + assertThat(builder1.level0StopWritesTrigger()).isEqualTo(584); + assertThat(builder1.maxCompactionBytes()).isEqualTo(12); + assertThat(builder1.targetFileSizeBase()).isEqualTo(99); + assertThat(builder1.targetFileSizeMultiplier()).isEqualTo(112); + } + } + + @Test + public void testGetMutableDBOptionsAfterSet() throws RocksDBException { + final ColumnFamilyOptions columnFamilyOptions0 = new ColumnFamilyOptions(); + final ColumnFamilyDescriptor columnFamilyDescriptor0 = + new ColumnFamilyDescriptor("default".getBytes(UTF_8), columnFamilyOptions0); + final List columnFamilyDescriptors = + Collections.singletonList(columnFamilyDescriptor0); + final List columnFamilyHandles = new ArrayList<>(); + + try (final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(dbOptions, dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + final MutableDBOptions.MutableDBOptionsBuilder mutableDBOptions = + MutableDBOptions.builder() + .setMaxBackgroundJobs(16) + .setAvoidFlushDuringShutdown(true) + .setWritableFileMaxBufferSize(2097152) + .setDelayedWriteRate(67108864) + .setMaxTotalWalSize(16777216) + .setDeleteObsoleteFilesPeriodMicros(86400000000L) + .setStatsDumpPeriodSec(1200) + .setStatsPersistPeriodSec(7200) + .setStatsHistoryBufferSize(6291456) + .setMaxOpenFiles(8) + .setBytesPerSync(4194304) + .setWalBytesPerSync(1048576) + .setStrictBytesPerSync(true) + .setCompactionReadaheadSize(1024); + + db.setDBOptions(mutableDBOptions.build()); + + final MutableDBOptions.MutableDBOptionsBuilder getBuilder = db.getDBOptions(); + assertThat(getBuilder.maxBackgroundJobs()).isEqualTo(16); // 4 + assertThat(getBuilder.avoidFlushDuringShutdown()).isEqualTo(true); // false + assertThat(getBuilder.writableFileMaxBufferSize()).isEqualTo(2097152); // 1048576 + assertThat(getBuilder.delayedWriteRate()).isEqualTo(67108864); // 16777216 + assertThat(getBuilder.maxTotalWalSize()).isEqualTo(16777216); + assertThat(getBuilder.deleteObsoleteFilesPeriodMicros()) + .isEqualTo(86400000000L); // 21600000000 + assertThat(getBuilder.statsDumpPeriodSec()).isEqualTo(1200); // 600 + assertThat(getBuilder.statsPersistPeriodSec()).isEqualTo(7200); // 600 + assertThat(getBuilder.statsHistoryBufferSize()).isEqualTo(6291456); // 1048576 + assertThat(getBuilder.maxOpenFiles()).isEqualTo(8); //-1 + assertThat(getBuilder.bytesPerSync()).isEqualTo(4194304); // 1048576 + assertThat(getBuilder.walBytesPerSync()).isEqualTo(1048576); // 0 + assertThat(getBuilder.strictBytesPerSync()).isEqualTo(true); // false + assertThat(getBuilder.compactionReadaheadSize()).isEqualTo(1024); // 0 + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java new file mode 100644 index 000000000..970e58c0c --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/NativeComparatorWrapperTest.java @@ -0,0 +1,95 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.*; +import java.util.Comparator; + +import static org.junit.Assert.assertEquals; + +public class NativeComparatorWrapperTest { + static { + RocksDB.loadLibrary(); + } + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + private static final Random random = new Random(); + + @Test + public void rountrip() throws RocksDBException { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + final int ITERATIONS = 1_000; + + final String[] storedKeys = new String[ITERATIONS]; + try (final NativeStringComparatorWrapper comparator = new NativeStringComparatorWrapper(); + final Options opt = new Options() + .setCreateIfMissing(true) + .setComparator(comparator)) { + + // store random integer keys + try (final RocksDB db = RocksDB.open(opt, dbPath)) { + for (int i = 0; i < ITERATIONS; i++) { + final String strKey = randomString(); + final byte key[] = strKey.getBytes(); + // does key already exist (avoid duplicates) + if (i > 0 && db.get(key) != null) { + i--; // generate a different key + } else { + db.put(key, "value".getBytes()); + storedKeys[i] = strKey; + } + } + } + + // sort the stored keys into ascending alpha-numeric order + Arrays.sort(storedKeys, new Comparator() { + @Override + public int compare(final String o1, final String o2) { + return o1.compareTo(o2); + } + }); + + // re-open db and read from start to end + // string keys should be in ascending + // order + try (final RocksDB db = RocksDB.open(opt, dbPath); + final RocksIterator it = db.newIterator()) { + int count = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + final String strKey = new String(it.key()); + assertEquals(storedKeys[count++], strKey); + } + } + } + } + + private String randomString() { + final char[] chars = new char[12]; + for(int i = 0; i < 12; i++) { + final int letterCode = random.nextInt(24); + final char letter = (char) (((int) 'a') + letterCode); + chars[i] = letter; + } + return String.copyValueOf(chars); + } + + public static class NativeStringComparatorWrapper + extends NativeComparatorWrapper { + + @Override + protected long initializeNative(final long... nativeParameterHandles) { + return newStringComparator(); + } + + private native long newStringComparator(); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java new file mode 100644 index 000000000..ab60081a0 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java @@ -0,0 +1,41 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.util.Environment; + +import java.io.File; +import java.io.IOException; +import java.nio.file.*; + +import static org.assertj.core.api.Assertions.assertThat; + +public class NativeLibraryLoaderTest { + + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + @Test + public void tempFolder() throws IOException { + NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp( + temporaryFolder.getRoot().getAbsolutePath()); + final Path path = Paths.get(temporaryFolder.getRoot().getAbsolutePath(), + Environment.getJniLibraryFileName("rocksdb")); + assertThat(Files.exists(path)).isTrue(); + assertThat(Files.isReadable(path)).isTrue(); + } + + @Test + public void overridesExistingLibrary() throws IOException { + File first = NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp( + temporaryFolder.getRoot().getAbsolutePath()); + NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp( + temporaryFolder.getRoot().getAbsolutePath()); + assertThat(first.exists()).isTrue(); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java new file mode 100644 index 000000000..519b70b1d --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionDBTest.java @@ -0,0 +1,131 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class OptimisticTransactionDBTest { + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void open() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(otdb).isNotNull(); + } + } + + @Test + public void open_columnFamilies() throws RocksDBException { + try(final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions myCfOpts = new ColumnFamilyOptions()) { + + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("myCf".getBytes(), myCfOpts)); + + final List columnFamilyHandles = new ArrayList<>(); + + try (final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(dbOptions, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + try { + assertThat(otdb).isNotNull(); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void beginTransaction() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( + options, dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions()) { + + try(final Transaction txn = otdb.beginTransaction(writeOptions)) { + assertThat(txn).isNotNull(); + } + } + } + + @Test + public void beginTransaction_transactionOptions() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( + options, dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions(); + final OptimisticTransactionOptions optimisticTxnOptions = + new OptimisticTransactionOptions()) { + + try(final Transaction txn = otdb.beginTransaction(writeOptions, + optimisticTxnOptions)) { + assertThat(txn).isNotNull(); + } + } + } + + @Test + public void beginTransaction_withOld() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( + options, dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions()) { + + try(final Transaction txn = otdb.beginTransaction(writeOptions)) { + final Transaction txnReused = otdb.beginTransaction(writeOptions, txn); + assertThat(txnReused).isSameAs(txn); + } + } + } + + @Test + public void beginTransaction_withOld_transactionOptions() + throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open( + options, dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions(); + final OptimisticTransactionOptions optimisticTxnOptions = + new OptimisticTransactionOptions()) { + + try(final Transaction txn = otdb.beginTransaction(writeOptions)) { + final Transaction txnReused = otdb.beginTransaction(writeOptions, + optimisticTxnOptions, txn); + assertThat(txnReused).isSameAs(txn); + } + } + } + + @Test + public void baseDB() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final OptimisticTransactionDB otdb = OptimisticTransactionDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(otdb).isNotNull(); + final RocksDB db = otdb.getBaseDB(); + assertThat(db).isNotNull(); + assertThat(db.isOwningHandle()).isFalse(); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java new file mode 100644 index 000000000..ef656b958 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionOptionsTest.java @@ -0,0 +1,38 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; +import org.rocksdb.util.BytewiseComparator; + +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; + +public class OptimisticTransactionOptionsTest { + + private static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void setSnapshot() { + try (final OptimisticTransactionOptions opt = new OptimisticTransactionOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setSetSnapshot(boolValue); + assertThat(opt.isSetSnapshot()).isEqualTo(boolValue); + } + } + + @Test + public void comparator() { + try (final OptimisticTransactionOptions opt = new OptimisticTransactionOptions(); + final ComparatorOptions copt = new ComparatorOptions() + .setUseDirectBuffer(true); + final AbstractComparator comparator = new BytewiseComparator(copt)) { + opt.setComparator(comparator); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java new file mode 100644 index 000000000..d2f92e1ff --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/OptimisticTransactionTest.java @@ -0,0 +1,446 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.*; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.Test; + +public class OptimisticTransactionTest extends AbstractTransactionTest { + @Test + public void prepare_commit() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] v12 = "value12".getBytes(UTF_8); + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + txn.commit(); + } + + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v12); + txn.prepare(); + + failBecauseExceptionWasNotThrown(RocksDBException.class); + } catch (final RocksDBException e) { + assertThat(e.getMessage()) + .contains("Two phase commit not supported for optimistic transactions"); + } + } + } + + @Test + public void getForUpdate_cf_conflict() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] v12 = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(testCf, k1, v12); + assertThat(txn2.get(testCf, readOptions, k1)).isEqualTo(v12); + txn2.commit(); + + try { + txn3.commit(); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void getForUpdate_conflict() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] v12 = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(k1, v12); + assertThat(txn2.get(readOptions, k1)).isEqualTo(v12); + txn2.commit(); + + try { + txn3.commit(); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Deprecated + @Test + public void multiGetForUpdate_cf_conflict() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, keys[0], values[0]); + txn.put(testCf, keys[1], values[1]); + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdate(readOptions, cfList, keys)) + .isEqualTo(values); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(testCf, keys[0], otherValue); + assertThat(txn2.get(testCf, readOptions, keys[0])) + .isEqualTo(otherValue); + txn2.commit(); + + try { + txn3.commit(); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void multiGetAsListForUpdate_cf_conflict() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, keys[0], values[0]); + txn.put(testCf, keys[1], values[1]); + assertThat(txn.multiGetAsList(readOptions, cfList, Arrays.asList(keys))) + .containsExactly(values); + txn.commit(); + } + + try (final Transaction txn2 = dbContainer.beginTransaction()) { + try (final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdateAsList(readOptions, cfList, Arrays.asList(keys))) + .containsExactly(values); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(testCf, keys[0], otherValue); + assertThat(txn2.get(testCf, readOptions, keys[0])).isEqualTo(otherValue); + txn2.commit(); + + try { + txn3.commit(); // should cause an exception! + } catch (final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Deprecated + @Test + public void multiGetForUpdate_conflict() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(keys[0], values[0]); + txn.put(keys[1], values[1]); + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdate(readOptions, keys)) + .isEqualTo(values); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(keys[0], otherValue); + assertThat(txn2.get(readOptions, keys[0])) + .isEqualTo(otherValue); + txn2.commit(); + + try { + txn3.commit(); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void multiGetasListForUpdate_conflict() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.put(keys[0], values[0]); + txn.put(keys[1], values[1]); + assertThat(txn.multiGetAsList(readOptions, Arrays.asList(keys))).containsExactly(values); + txn.commit(); + } + + try (final Transaction txn2 = dbContainer.beginTransaction()) { + try (final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdateAsList(readOptions, Arrays.asList(keys))) + .containsExactly(values); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(keys[0], otherValue); + assertThat(txn2.get(readOptions, keys[0])).isEqualTo(otherValue); + txn2.commit(); + + try { + txn3.commit(); // should cause an exception! + } catch (final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.Busy); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void undoGetForUpdate_cf_conflict() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] v12 = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, k1, v1); + assertThat(txn.get(testCf, readOptions, k1)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + + // undo the getForUpdate + txn3.undoGetForUpdate(testCf, k1); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(testCf, k1, v12); + assertThat(txn2.get(testCf, readOptions, k1)).isEqualTo(v12); + txn2.commit(); + + // should not cause an exception + // because we undid the getForUpdate above! + txn3.commit(); + } + } + } + } + + @Test + public void undoGetForUpdate_conflict() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] v12 = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + + // undo the getForUpdate + txn3.undoGetForUpdate(k1); + + // NOTE: txn2 updates k1, during txn3 + txn2.put(k1, v12); + assertThat(txn2.get(readOptions, k1)).isEqualTo(v12); + txn2.commit(); + + // should not cause an exception + // because we undid the getForUpdate above! + txn3.commit(); + } + } + } + } + + @Test + public void name() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getName()).isEmpty(); + final String name = "my-transaction-" + rand.nextLong(); + + try { + txn.setName(name); + fail("Optimistic transactions cannot be named."); + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isEqualTo(Status.Code.InvalidArgument); + } + } + } + + @Override + public OptimisticTransactionDBContainer startDb() + throws RocksDBException { + final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + + final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions(); + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(TXN_TEST_COLUMN_FAMILY, + columnFamilyOptions)); + final List columnFamilyHandles = new ArrayList<>(); + + final OptimisticTransactionDB optimisticTxnDb; + try { + optimisticTxnDb = OptimisticTransactionDB.open( + options, dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles); + } catch(final RocksDBException e) { + columnFamilyOptions.close(); + options.close(); + throw e; + } + + final WriteOptions writeOptions = new WriteOptions(); + final OptimisticTransactionOptions optimisticTxnOptions = + new OptimisticTransactionOptions(); + + return new OptimisticTransactionDBContainer(optimisticTxnOptions, + writeOptions, columnFamilyHandles, optimisticTxnDb, columnFamilyOptions, + options); + } + + private static class OptimisticTransactionDBContainer + extends DBContainer { + + private final OptimisticTransactionOptions optimisticTxnOptions; + private final OptimisticTransactionDB optimisticTxnDb; + + public OptimisticTransactionDBContainer( + final OptimisticTransactionOptions optimisticTxnOptions, + final WriteOptions writeOptions, + final List columnFamilyHandles, + final OptimisticTransactionDB optimisticTxnDb, + final ColumnFamilyOptions columnFamilyOptions, + final DBOptions options) { + super(writeOptions, columnFamilyHandles, columnFamilyOptions, + options); + this.optimisticTxnOptions = optimisticTxnOptions; + this.optimisticTxnDb = optimisticTxnDb; + } + + @Override + public Transaction beginTransaction() { + return optimisticTxnDb.beginTransaction(writeOptions, + optimisticTxnOptions); + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions) { + return optimisticTxnDb.beginTransaction(writeOptions, + optimisticTxnOptions); + } + + @Override + public void close() { + optimisticTxnOptions.close(); + writeOptions.close(); + for(final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + columnFamilyHandle.close(); + } + optimisticTxnDb.close(); + options.close(); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java new file mode 100644 index 000000000..129f1c39a --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java @@ -0,0 +1,1492 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.*; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.*; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.ClassRule; +import org.junit.Test; +import org.rocksdb.test.RemoveEmptyValueCompactionFilterFactory; + +public class OptionsTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + public static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void copyConstructor() { + Options origOpts = new Options(); + origOpts.setNumLevels(rand.nextInt(8)); + origOpts.setTargetFileSizeMultiplier(rand.nextInt(100)); + origOpts.setLevel0StopWritesTrigger(rand.nextInt(50)); + Options copyOpts = new Options(origOpts); + assertThat(origOpts.numLevels()).isEqualTo(copyOpts.numLevels()); + assertThat(origOpts.targetFileSizeMultiplier()).isEqualTo(copyOpts.targetFileSizeMultiplier()); + assertThat(origOpts.level0StopWritesTrigger()).isEqualTo(copyOpts.level0StopWritesTrigger()); + } + + @Test + public void setIncreaseParallelism() { + try (final Options opt = new Options()) { + final int threads = Runtime.getRuntime().availableProcessors() * 2; + opt.setIncreaseParallelism(threads); + } + } + + @Test + public void writeBufferSize() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWriteBufferSize(longValue); + assertThat(opt.writeBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void maxWriteBufferNumber() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxWriteBufferNumber(intValue); + assertThat(opt.maxWriteBufferNumber()).isEqualTo(intValue); + } + } + + @Test + public void minWriteBufferNumberToMerge() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMinWriteBufferNumberToMerge(intValue); + assertThat(opt.minWriteBufferNumberToMerge()).isEqualTo(intValue); + } + } + + @Test + public void numLevels() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setNumLevels(intValue); + assertThat(opt.numLevels()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroFileNumCompactionTrigger() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroFileNumCompactionTrigger(intValue); + assertThat(opt.levelZeroFileNumCompactionTrigger()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroSlowdownWritesTrigger() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroSlowdownWritesTrigger(intValue); + assertThat(opt.levelZeroSlowdownWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroStopWritesTrigger() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroStopWritesTrigger(intValue); + assertThat(opt.levelZeroStopWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void targetFileSizeBase() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setTargetFileSizeBase(longValue); + assertThat(opt.targetFileSizeBase()).isEqualTo(longValue); + } + } + + @Test + public void targetFileSizeMultiplier() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setTargetFileSizeMultiplier(intValue); + assertThat(opt.targetFileSizeMultiplier()).isEqualTo(intValue); + } + } + + @Test + public void maxBytesForLevelBase() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxBytesForLevelBase(longValue); + assertThat(opt.maxBytesForLevelBase()).isEqualTo(longValue); + } + } + + @Test + public void levelCompactionDynamicLevelBytes() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setLevelCompactionDynamicLevelBytes(boolValue); + assertThat(opt.levelCompactionDynamicLevelBytes()) + .isEqualTo(boolValue); + } + } + + @Test + public void maxBytesForLevelMultiplier() { + try (final Options opt = new Options()) { + final double doubleValue = rand.nextDouble(); + opt.setMaxBytesForLevelMultiplier(doubleValue); + assertThat(opt.maxBytesForLevelMultiplier()).isEqualTo(doubleValue); + } + } + + @Test + public void maxBytesForLevelMultiplierAdditional() { + try (final Options opt = new Options()) { + final int intValue1 = rand.nextInt(); + final int intValue2 = rand.nextInt(); + final int[] ints = new int[]{intValue1, intValue2}; + opt.setMaxBytesForLevelMultiplierAdditional(ints); + assertThat(opt.maxBytesForLevelMultiplierAdditional()).isEqualTo(ints); + } + } + + @Test + public void maxCompactionBytes() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxCompactionBytes(longValue); + assertThat(opt.maxCompactionBytes()).isEqualTo(longValue); + } + } + + @Test + public void softPendingCompactionBytesLimit() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setSoftPendingCompactionBytesLimit(longValue); + assertThat(opt.softPendingCompactionBytesLimit()).isEqualTo(longValue); + } + } + + @Test + public void hardPendingCompactionBytesLimit() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setHardPendingCompactionBytesLimit(longValue); + assertThat(opt.hardPendingCompactionBytesLimit()).isEqualTo(longValue); + } + } + + @Test + public void level0FileNumCompactionTrigger() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setLevel0FileNumCompactionTrigger(intValue); + assertThat(opt.level0FileNumCompactionTrigger()).isEqualTo(intValue); + } + } + + @Test + public void level0SlowdownWritesTrigger() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setLevel0SlowdownWritesTrigger(intValue); + assertThat(opt.level0SlowdownWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void level0StopWritesTrigger() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setLevel0StopWritesTrigger(intValue); + assertThat(opt.level0StopWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void arenaBlockSize() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setArenaBlockSize(longValue); + assertThat(opt.arenaBlockSize()).isEqualTo(longValue); + } + } + + @Test + public void disableAutoCompactions() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDisableAutoCompactions(boolValue); + assertThat(opt.disableAutoCompactions()).isEqualTo(boolValue); + } + } + + @Test + public void maxSequentialSkipInIterations() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxSequentialSkipInIterations(longValue); + assertThat(opt.maxSequentialSkipInIterations()).isEqualTo(longValue); + } + } + + @Test + public void inplaceUpdateSupport() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setInplaceUpdateSupport(boolValue); + assertThat(opt.inplaceUpdateSupport()).isEqualTo(boolValue); + } + } + + @Test + public void inplaceUpdateNumLocks() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setInplaceUpdateNumLocks(longValue); + assertThat(opt.inplaceUpdateNumLocks()).isEqualTo(longValue); + } + } + + @Test + public void memtablePrefixBloomSizeRatio() { + try (final Options opt = new Options()) { + final double doubleValue = rand.nextDouble(); + opt.setMemtablePrefixBloomSizeRatio(doubleValue); + assertThat(opt.memtablePrefixBloomSizeRatio()).isEqualTo(doubleValue); + } + } + + @Test + public void experimentalMempurgeThreshold() { + try (final Options opt = new Options()) { + final double doubleValue = rand.nextDouble(); + opt.setExperimentalMempurgeThreshold(doubleValue); + assertThat(opt.experimentalMempurgeThreshold()).isEqualTo(doubleValue); + } + } + + @Test + public void memtableWholeKeyFiltering() { + try (final Options opt = new Options()) { + final boolean booleanValue = rand.nextBoolean(); + opt.setMemtableWholeKeyFiltering(booleanValue); + assertThat(opt.memtableWholeKeyFiltering()).isEqualTo(booleanValue); + } + } + + @Test + public void memtableHugePageSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMemtableHugePageSize(longValue); + assertThat(opt.memtableHugePageSize()).isEqualTo(longValue); + } + } + + @Test + public void bloomLocality() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setBloomLocality(intValue); + assertThat(opt.bloomLocality()).isEqualTo(intValue); + } + } + + @Test + public void maxSuccessiveMerges() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxSuccessiveMerges(longValue); + assertThat(opt.maxSuccessiveMerges()).isEqualTo(longValue); + } + } + + @Test + public void optimizeFiltersForHits() { + try (final Options opt = new Options()) { + final boolean aBoolean = rand.nextBoolean(); + opt.setOptimizeFiltersForHits(aBoolean); + assertThat(opt.optimizeFiltersForHits()).isEqualTo(aBoolean); + } + } + + @Test + public void createIfMissing() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setCreateIfMissing(boolValue); + assertThat(opt.createIfMissing()). + isEqualTo(boolValue); + } + } + + @Test + public void createMissingColumnFamilies() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setCreateMissingColumnFamilies(boolValue); + assertThat(opt.createMissingColumnFamilies()). + isEqualTo(boolValue); + } + } + + @Test + public void errorIfExists() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setErrorIfExists(boolValue); + assertThat(opt.errorIfExists()).isEqualTo(boolValue); + } + } + + @Test + public void paranoidChecks() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setParanoidChecks(boolValue); + assertThat(opt.paranoidChecks()). + isEqualTo(boolValue); + } + } + + @Test + public void maxTotalWalSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxTotalWalSize(longValue); + assertThat(opt.maxTotalWalSize()). + isEqualTo(longValue); + } + } + + @Test + public void maxOpenFiles() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxOpenFiles(intValue); + assertThat(opt.maxOpenFiles()).isEqualTo(intValue); + } + } + + @Test + public void maxFileOpeningThreads() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxFileOpeningThreads(intValue); + assertThat(opt.maxFileOpeningThreads()).isEqualTo(intValue); + } + } + + @Test + public void useFsync() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseFsync(boolValue); + assertThat(opt.useFsync()).isEqualTo(boolValue); + } + } + + @Test + public void dbPaths() { + final List dbPaths = new ArrayList<>(); + dbPaths.add(new DbPath(Paths.get("/a"), 10)); + dbPaths.add(new DbPath(Paths.get("/b"), 100)); + dbPaths.add(new DbPath(Paths.get("/c"), 1000)); + + try (final Options opt = new Options()) { + assertThat(opt.dbPaths()).isEqualTo(Collections.emptyList()); + + opt.setDbPaths(dbPaths); + + assertThat(opt.dbPaths()).isEqualTo(dbPaths); + } + } + + @Test + public void dbLogDir() { + try (final Options opt = new Options()) { + final String str = "path/to/DbLogDir"; + opt.setDbLogDir(str); + assertThat(opt.dbLogDir()).isEqualTo(str); + } + } + + @Test + public void walDir() { + try (final Options opt = new Options()) { + final String str = "path/to/WalDir"; + opt.setWalDir(str); + assertThat(opt.walDir()).isEqualTo(str); + } + } + + @Test + public void deleteObsoleteFilesPeriodMicros() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setDeleteObsoleteFilesPeriodMicros(longValue); + assertThat(opt.deleteObsoleteFilesPeriodMicros()). + isEqualTo(longValue); + } + } + + @SuppressWarnings("deprecated") + @Test + public void maxBackgroundCompactions() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxBackgroundCompactions(intValue); + assertThat(opt.maxBackgroundCompactions()). + isEqualTo(intValue); + } + } + + @Test + public void maxSubcompactions() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxSubcompactions(intValue); + assertThat(opt.maxSubcompactions()). + isEqualTo(intValue); + } + } + + @SuppressWarnings("deprecated") + @Test + public void maxBackgroundFlushes() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxBackgroundFlushes(intValue); + assertThat(opt.maxBackgroundFlushes()). + isEqualTo(intValue); + } + } + + @Test + public void maxBackgroundJobs() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxBackgroundJobs(intValue); + assertThat(opt.maxBackgroundJobs()).isEqualTo(intValue); + } + } + + @Test + public void maxLogFileSize() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxLogFileSize(longValue); + assertThat(opt.maxLogFileSize()).isEqualTo(longValue); + } + } + + @Test + public void logFileTimeToRoll() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setLogFileTimeToRoll(longValue); + assertThat(opt.logFileTimeToRoll()). + isEqualTo(longValue); + } + } + + @Test + public void keepLogFileNum() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setKeepLogFileNum(longValue); + assertThat(opt.keepLogFileNum()).isEqualTo(longValue); + } + } + + @Test + public void recycleLogFileNum() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setRecycleLogFileNum(longValue); + assertThat(opt.recycleLogFileNum()).isEqualTo(longValue); + } + } + + @Test + public void maxManifestFileSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxManifestFileSize(longValue); + assertThat(opt.maxManifestFileSize()). + isEqualTo(longValue); + } + } + + @Test + public void tableCacheNumshardbits() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setTableCacheNumshardbits(intValue); + assertThat(opt.tableCacheNumshardbits()). + isEqualTo(intValue); + } + } + + @Test + public void walSizeLimitMB() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWalSizeLimitMB(longValue); + assertThat(opt.walSizeLimitMB()).isEqualTo(longValue); + } + } + + @Test + public void walTtlSeconds() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWalTtlSeconds(longValue); + assertThat(opt.walTtlSeconds()).isEqualTo(longValue); + } + } + + @Test + public void manifestPreallocationSize() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setManifestPreallocationSize(longValue); + assertThat(opt.manifestPreallocationSize()). + isEqualTo(longValue); + } + } + + @Test + public void useDirectReads() { + try(final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseDirectReads(boolValue); + assertThat(opt.useDirectReads()).isEqualTo(boolValue); + } + } + + @Test + public void useDirectIoForFlushAndCompaction() { + try(final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseDirectIoForFlushAndCompaction(boolValue); + assertThat(opt.useDirectIoForFlushAndCompaction()).isEqualTo(boolValue); + } + } + + @Test + public void allowFAllocate() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowFAllocate(boolValue); + assertThat(opt.allowFAllocate()).isEqualTo(boolValue); + } + } + + @Test + public void allowMmapReads() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowMmapReads(boolValue); + assertThat(opt.allowMmapReads()).isEqualTo(boolValue); + } + } + + @Test + public void allowMmapWrites() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowMmapWrites(boolValue); + assertThat(opt.allowMmapWrites()).isEqualTo(boolValue); + } + } + + @Test + public void isFdCloseOnExec() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setIsFdCloseOnExec(boolValue); + assertThat(opt.isFdCloseOnExec()).isEqualTo(boolValue); + } + } + + @Test + public void statsDumpPeriodSec() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setStatsDumpPeriodSec(intValue); + assertThat(opt.statsDumpPeriodSec()).isEqualTo(intValue); + } + } + + @Test + public void statsPersistPeriodSec() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setStatsPersistPeriodSec(intValue); + assertThat(opt.statsPersistPeriodSec()).isEqualTo(intValue); + } + } + + @Test + public void statsHistoryBufferSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setStatsHistoryBufferSize(longValue); + assertThat(opt.statsHistoryBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void adviseRandomOnOpen() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAdviseRandomOnOpen(boolValue); + assertThat(opt.adviseRandomOnOpen()).isEqualTo(boolValue); + } + } + + @Test + public void dbWriteBufferSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setDbWriteBufferSize(longValue); + assertThat(opt.dbWriteBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void setWriteBufferManager() throws RocksDBException { + try (final Options opt = new Options(); + final Cache cache = new LRUCache(1 * 1024 * 1024); + final WriteBufferManager writeBufferManager = new WriteBufferManager(2000l, cache)) { + opt.setWriteBufferManager(writeBufferManager); + assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager); + } + } + + @Test + public void setWriteBufferManagerWithZeroBufferSize() throws RocksDBException { + try (final Options opt = new Options(); + final Cache cache = new LRUCache(1 * 1024 * 1024); + final WriteBufferManager writeBufferManager = new WriteBufferManager(0l, cache)) { + opt.setWriteBufferManager(writeBufferManager); + assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager); + } + } + + @Test + public void setWriteBufferManagerWithAllowStall() throws RocksDBException { + try (final Options opt = new Options(); final Cache cache = new LRUCache(1 * 1024 * 1024); + final WriteBufferManager writeBufferManager = new WriteBufferManager(2000l, cache, true)) { + opt.setWriteBufferManager(writeBufferManager); + assertThat(opt.writeBufferManager()).isEqualTo(writeBufferManager); + assertThat(opt.writeBufferManager().allowStall()).isEqualTo(true); + } + } + + @Test + public void accessHintOnCompactionStart() { + try (final Options opt = new Options()) { + final AccessHint accessHint = AccessHint.SEQUENTIAL; + opt.setAccessHintOnCompactionStart(accessHint); + assertThat(opt.accessHintOnCompactionStart()).isEqualTo(accessHint); + } + } + + @Test + public void compactionReadaheadSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setCompactionReadaheadSize(longValue); + assertThat(opt.compactionReadaheadSize()).isEqualTo(longValue); + } + } + + @Test + public void randomAccessMaxBufferSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setRandomAccessMaxBufferSize(longValue); + assertThat(opt.randomAccessMaxBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void writableFileMaxBufferSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWritableFileMaxBufferSize(longValue); + assertThat(opt.writableFileMaxBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void useAdaptiveMutex() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseAdaptiveMutex(boolValue); + assertThat(opt.useAdaptiveMutex()).isEqualTo(boolValue); + } + } + + @Test + public void bytesPerSync() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setBytesPerSync(longValue); + assertThat(opt.bytesPerSync()).isEqualTo(longValue); + } + } + + @Test + public void walBytesPerSync() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWalBytesPerSync(longValue); + assertThat(opt.walBytesPerSync()).isEqualTo(longValue); + } + } + + @Test + public void strictBytesPerSync() { + try (final Options opt = new Options()) { + assertThat(opt.strictBytesPerSync()).isFalse(); + opt.setStrictBytesPerSync(true); + assertThat(opt.strictBytesPerSync()).isTrue(); + } + } + + @Test + public void enableThreadTracking() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setEnableThreadTracking(boolValue); + assertThat(opt.enableThreadTracking()).isEqualTo(boolValue); + } + } + + @Test + public void delayedWriteRate() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setDelayedWriteRate(longValue); + assertThat(opt.delayedWriteRate()).isEqualTo(longValue); + } + } + + @Test + public void enablePipelinedWrite() { + try(final Options opt = new Options()) { + assertThat(opt.enablePipelinedWrite()).isFalse(); + opt.setEnablePipelinedWrite(true); + assertThat(opt.enablePipelinedWrite()).isTrue(); + } + } + + @Test + public void unordredWrite() { + try(final Options opt = new Options()) { + assertThat(opt.unorderedWrite()).isFalse(); + opt.setUnorderedWrite(true); + assertThat(opt.unorderedWrite()).isTrue(); + } + } + + @Test + public void allowConcurrentMemtableWrite() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowConcurrentMemtableWrite(boolValue); + assertThat(opt.allowConcurrentMemtableWrite()).isEqualTo(boolValue); + } + } + + @Test + public void enableWriteThreadAdaptiveYield() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setEnableWriteThreadAdaptiveYield(boolValue); + assertThat(opt.enableWriteThreadAdaptiveYield()).isEqualTo(boolValue); + } + } + + @Test + public void writeThreadMaxYieldUsec() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWriteThreadMaxYieldUsec(longValue); + assertThat(opt.writeThreadMaxYieldUsec()).isEqualTo(longValue); + } + } + + @Test + public void writeThreadSlowYieldUsec() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWriteThreadSlowYieldUsec(longValue); + assertThat(opt.writeThreadSlowYieldUsec()).isEqualTo(longValue); + } + } + + @Test + public void skipStatsUpdateOnDbOpen() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setSkipStatsUpdateOnDbOpen(boolValue); + assertThat(opt.skipStatsUpdateOnDbOpen()).isEqualTo(boolValue); + } + } + + @Test + public void walRecoveryMode() { + try (final Options opt = new Options()) { + for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) { + opt.setWalRecoveryMode(walRecoveryMode); + assertThat(opt.walRecoveryMode()).isEqualTo(walRecoveryMode); + } + } + } + + @Test + public void allow2pc() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllow2pc(boolValue); + assertThat(opt.allow2pc()).isEqualTo(boolValue); + } + } + + @Test + public void rowCache() { + try (final Options opt = new Options()) { + assertThat(opt.rowCache()).isNull(); + + try(final Cache lruCache = new LRUCache(1000)) { + opt.setRowCache(lruCache); + assertThat(opt.rowCache()).isEqualTo(lruCache); + } + + try(final Cache clockCache = new ClockCache(1000)) { + opt.setRowCache(clockCache); + assertThat(opt.rowCache()).isEqualTo(clockCache); + } + } + } + + @Test + public void walFilter() { + try (final Options opt = new Options()) { + assertThat(opt.walFilter()).isNull(); + + try (final AbstractWalFilter walFilter = new AbstractWalFilter() { + @Override + public void columnFamilyLogNumberMap( + final Map cfLognumber, + final Map cfNameId) { + // no-op + } + + @Override + public LogRecordFoundResult logRecordFound(final long logNumber, + final String logFileName, final WriteBatch batch, + final WriteBatch newBatch) { + return new LogRecordFoundResult( + WalProcessingOption.CONTINUE_PROCESSING, false); + } + + @Override + public String name() { + return "test-wal-filter"; + } + }) { + opt.setWalFilter(walFilter); + assertThat(opt.walFilter()).isEqualTo(walFilter); + } + } + } + + @Test + public void failIfOptionsFileError() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setFailIfOptionsFileError(boolValue); + assertThat(opt.failIfOptionsFileError()).isEqualTo(boolValue); + } + } + + @Test + public void dumpMallocStats() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDumpMallocStats(boolValue); + assertThat(opt.dumpMallocStats()).isEqualTo(boolValue); + } + } + + @Test + public void avoidFlushDuringRecovery() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAvoidFlushDuringRecovery(boolValue); + assertThat(opt.avoidFlushDuringRecovery()).isEqualTo(boolValue); + } + } + + @Test + public void avoidFlushDuringShutdown() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAvoidFlushDuringShutdown(boolValue); + assertThat(opt.avoidFlushDuringShutdown()).isEqualTo(boolValue); + } + } + + + @Test + public void allowIngestBehind() { + try (final Options opt = new Options()) { + assertThat(opt.allowIngestBehind()).isFalse(); + opt.setAllowIngestBehind(true); + assertThat(opt.allowIngestBehind()).isTrue(); + } + } + + @Test + public void twoWriteQueues() { + try (final Options opt = new Options()) { + assertThat(opt.twoWriteQueues()).isFalse(); + opt.setTwoWriteQueues(true); + assertThat(opt.twoWriteQueues()).isTrue(); + } + } + + @Test + public void manualWalFlush() { + try (final Options opt = new Options()) { + assertThat(opt.manualWalFlush()).isFalse(); + opt.setManualWalFlush(true); + assertThat(opt.manualWalFlush()).isTrue(); + } + } + + @Test + public void atomicFlush() { + try (final Options opt = new Options()) { + assertThat(opt.atomicFlush()).isFalse(); + opt.setAtomicFlush(true); + assertThat(opt.atomicFlush()).isTrue(); + } + } + + @Test + public void env() { + try (final Options options = new Options(); + final Env env = Env.getDefault()) { + options.setEnv(env); + assertThat(options.getEnv()).isSameAs(env); + } + } + + @Test + public void linkageOfPrepMethods() { + try (final Options options = new Options()) { + options.optimizeUniversalStyleCompaction(); + options.optimizeUniversalStyleCompaction(4000); + options.optimizeLevelStyleCompaction(); + options.optimizeLevelStyleCompaction(3000); + options.optimizeForPointLookup(10); + options.optimizeForSmallDb(); + options.prepareForBulkLoad(); + } + } + + @Test + public void compressionTypes() { + try (final Options options = new Options()) { + for (final CompressionType compressionType : + CompressionType.values()) { + options.setCompressionType(compressionType); + assertThat(options.compressionType()). + isEqualTo(compressionType); + assertThat(CompressionType.valueOf("NO_COMPRESSION")). + isEqualTo(CompressionType.NO_COMPRESSION); + } + } + } + + @Test + public void prepopulateBlobCache() { + try (final Options options = new Options()) { + for (final PrepopulateBlobCache prepopulateBlobCache : PrepopulateBlobCache.values()) { + options.setPrepopulateBlobCache(prepopulateBlobCache); + assertThat(options.prepopulateBlobCache()).isEqualTo(prepopulateBlobCache); + assertThat(PrepopulateBlobCache.valueOf("PREPOPULATE_BLOB_DISABLE")) + .isEqualTo(PrepopulateBlobCache.PREPOPULATE_BLOB_DISABLE); + } + } + } + + @Test + public void compressionPerLevel() { + try (final Options options = new Options()) { + assertThat(options.compressionPerLevel()).isEmpty(); + List compressionTypeList = + new ArrayList<>(); + for (int i = 0; i < options.numLevels(); i++) { + compressionTypeList.add(CompressionType.NO_COMPRESSION); + } + options.setCompressionPerLevel(compressionTypeList); + compressionTypeList = options.compressionPerLevel(); + for (final CompressionType compressionType : compressionTypeList) { + assertThat(compressionType).isEqualTo( + CompressionType.NO_COMPRESSION); + } + } + } + + @Test + public void differentCompressionsPerLevel() { + try (final Options options = new Options()) { + options.setNumLevels(3); + + assertThat(options.compressionPerLevel()).isEmpty(); + List compressionTypeList = new ArrayList<>(); + + compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION); + compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION); + compressionTypeList.add(CompressionType.LZ4_COMPRESSION); + + options.setCompressionPerLevel(compressionTypeList); + compressionTypeList = options.compressionPerLevel(); + + assertThat(compressionTypeList.size()).isEqualTo(3); + assertThat(compressionTypeList). + containsExactly( + CompressionType.BZLIB2_COMPRESSION, + CompressionType.SNAPPY_COMPRESSION, + CompressionType.LZ4_COMPRESSION); + + } + } + + @Test + public void bottommostCompressionType() { + try (final Options options = new Options()) { + assertThat(options.bottommostCompressionType()) + .isEqualTo(CompressionType.DISABLE_COMPRESSION_OPTION); + + for (final CompressionType compressionType : CompressionType.values()) { + options.setBottommostCompressionType(compressionType); + assertThat(options.bottommostCompressionType()) + .isEqualTo(compressionType); + } + } + } + + @Test + public void bottommostCompressionOptions() { + try (final Options options = new Options(); + final CompressionOptions bottommostCompressionOptions = new CompressionOptions() + .setMaxDictBytes(123)) { + + options.setBottommostCompressionOptions(bottommostCompressionOptions); + assertThat(options.bottommostCompressionOptions()) + .isEqualTo(bottommostCompressionOptions); + assertThat(options.bottommostCompressionOptions().maxDictBytes()) + .isEqualTo(123); + } + } + + @Test + public void compressionOptions() { + try (final Options options = new Options(); + final CompressionOptions compressionOptions = new CompressionOptions() + .setMaxDictBytes(123)) { + + options.setCompressionOptions(compressionOptions); + assertThat(options.compressionOptions()) + .isEqualTo(compressionOptions); + assertThat(options.compressionOptions().maxDictBytes()) + .isEqualTo(123); + } + } + + @Test + public void compactionStyles() { + try (final Options options = new Options()) { + for (final CompactionStyle compactionStyle : + CompactionStyle.values()) { + options.setCompactionStyle(compactionStyle); + assertThat(options.compactionStyle()). + isEqualTo(compactionStyle); + assertThat(CompactionStyle.valueOf("FIFO")). + isEqualTo(CompactionStyle.FIFO); + } + } + } + + @Test + public void maxTableFilesSizeFIFO() { + try (final Options opt = new Options()) { + long longValue = rand.nextLong(); + // Size has to be positive + longValue = (longValue < 0) ? -longValue : longValue; + longValue = (longValue == 0) ? longValue + 1 : longValue; + opt.setMaxTableFilesSizeFIFO(longValue); + assertThat(opt.maxTableFilesSizeFIFO()). + isEqualTo(longValue); + } + } + + @Test + public void rateLimiter() { + try (final Options options = new Options(); + final Options anotherOptions = new Options(); + final RateLimiter rateLimiter = + new RateLimiter(1000, 100 * 1000, 1)) { + options.setRateLimiter(rateLimiter); + // Test with parameter initialization + anotherOptions.setRateLimiter( + new RateLimiter(1000)); + } + } + + @Test + public void sstFileManager() throws RocksDBException { + try (final Options options = new Options(); + final SstFileManager sstFileManager = + new SstFileManager(Env.getDefault())) { + options.setSstFileManager(sstFileManager); + } + } + + @Test + public void shouldSetTestPrefixExtractor() { + try (final Options options = new Options()) { + options.useFixedLengthPrefixExtractor(100); + options.useFixedLengthPrefixExtractor(10); + } + } + + @Test + public void shouldSetTestCappedPrefixExtractor() { + try (final Options options = new Options()) { + options.useCappedPrefixExtractor(100); + options.useCappedPrefixExtractor(10); + } + } + + @Test + public void shouldTestMemTableFactoryName() + throws RocksDBException { + try (final Options options = new Options()) { + options.setMemTableConfig(new VectorMemTableConfig()); + assertThat(options.memTableFactoryName()). + isEqualTo("VectorRepFactory"); + options.setMemTableConfig( + new HashLinkedListMemTableConfig()); + assertThat(options.memTableFactoryName()). + isEqualTo("HashLinkedListRepFactory"); + } + } + + @Test + public void statistics() { + try(final Options options = new Options()) { + final Statistics statistics = options.statistics(); + assertThat(statistics).isNull(); + } + + try(final Statistics statistics = new Statistics(); + final Options options = new Options().setStatistics(statistics); + final Statistics stats = options.statistics()) { + assertThat(stats).isNotNull(); + } + } + + @Test + public void maxWriteBufferNumberToMaintain() { + try (final Options options = new Options()) { + int intValue = rand.nextInt(); + // Size has to be positive + intValue = (intValue < 0) ? -intValue : intValue; + intValue = (intValue == 0) ? intValue + 1 : intValue; + options.setMaxWriteBufferNumberToMaintain(intValue); + assertThat(options.maxWriteBufferNumberToMaintain()). + isEqualTo(intValue); + } + } + + @Test + public void compactionPriorities() { + try (final Options options = new Options()) { + for (final CompactionPriority compactionPriority : + CompactionPriority.values()) { + options.setCompactionPriority(compactionPriority); + assertThat(options.compactionPriority()). + isEqualTo(compactionPriority); + } + } + } + + @Test + public void reportBgIoStats() { + try (final Options options = new Options()) { + final boolean booleanValue = true; + options.setReportBgIoStats(booleanValue); + assertThat(options.reportBgIoStats()). + isEqualTo(booleanValue); + } + } + + @Test + public void ttl() { + try (final Options options = new Options()) { + options.setTtl(1000 * 60); + assertThat(options.ttl()). + isEqualTo(1000 * 60); + } + } + + @Test + public void periodicCompactionSeconds() { + try (final Options options = new Options()) { + options.setPeriodicCompactionSeconds(1000 * 60); + assertThat(options.periodicCompactionSeconds()).isEqualTo(1000 * 60); + } + } + + @Test + public void compactionOptionsUniversal() { + try (final Options options = new Options(); + final CompactionOptionsUniversal optUni = new CompactionOptionsUniversal() + .setCompressionSizePercent(7)) { + options.setCompactionOptionsUniversal(optUni); + assertThat(options.compactionOptionsUniversal()). + isEqualTo(optUni); + assertThat(options.compactionOptionsUniversal().compressionSizePercent()) + .isEqualTo(7); + } + } + + @Test + public void compactionOptionsFIFO() { + try (final Options options = new Options(); + final CompactionOptionsFIFO optFifo = new CompactionOptionsFIFO() + .setMaxTableFilesSize(2000)) { + options.setCompactionOptionsFIFO(optFifo); + assertThat(options.compactionOptionsFIFO()). + isEqualTo(optFifo); + assertThat(options.compactionOptionsFIFO().maxTableFilesSize()) + .isEqualTo(2000); + } + } + + @Test + public void forceConsistencyChecks() { + try (final Options options = new Options()) { + final boolean booleanValue = true; + options.setForceConsistencyChecks(booleanValue); + assertThat(options.forceConsistencyChecks()). + isEqualTo(booleanValue); + } + } + + @Test + public void compactionFilter() { + try(final Options options = new Options(); + final RemoveEmptyValueCompactionFilter cf = new RemoveEmptyValueCompactionFilter()) { + options.setCompactionFilter(cf); + assertThat(options.compactionFilter()).isEqualTo(cf); + } + } + + @Test + public void compactionFilterFactory() { + try(final Options options = new Options(); + final RemoveEmptyValueCompactionFilterFactory cff = new RemoveEmptyValueCompactionFilterFactory()) { + options.setCompactionFilterFactory(cff); + assertThat(options.compactionFilterFactory()).isEqualTo(cff); + } + } + + @Test + public void compactionThreadLimiter() { + try (final Options options = new Options(); + final ConcurrentTaskLimiter compactionThreadLimiter = + new ConcurrentTaskLimiterImpl("name", 3)) { + options.setCompactionThreadLimiter(compactionThreadLimiter); + assertThat(options.compactionThreadLimiter()).isEqualTo(compactionThreadLimiter); + } + } + + @Test + public void oldDefaults() { + try (final Options options = new Options()) { + options.oldDefaults(4, 6); + assertThat(options.writeBufferSize()).isEqualTo(4 << 20); + assertThat(options.compactionPriority()).isEqualTo(CompactionPriority.ByCompensatedSize); + assertThat(options.targetFileSizeBase()).isEqualTo(2 * 1048576); + assertThat(options.maxBytesForLevelBase()).isEqualTo(10 * 1048576); + assertThat(options.softPendingCompactionBytesLimit()).isEqualTo(0); + assertThat(options.hardPendingCompactionBytesLimit()).isEqualTo(0); + assertThat(options.level0StopWritesTrigger()).isEqualTo(24); + } + } + + @Test + public void optimizeForSmallDbWithCache() { + try (final Options options = new Options(); final Cache cache = new LRUCache(1024)) { + assertThat(options.optimizeForSmallDb(cache)).isEqualTo(options); + } + } + + @Test + public void cfPaths() { + try (final Options options = new Options()) { + final List paths = Arrays.asList( + new DbPath(Paths.get("test1"), 2 << 25), new DbPath(Paths.get("/test2/path"), 2 << 25)); + assertThat(options.cfPaths()).isEqualTo(Collections.emptyList()); + assertThat(options.setCfPaths(paths)).isEqualTo(options); + assertThat(options.cfPaths()).isEqualTo(paths); + } + } + + @Test + public void avoidUnnecessaryBlockingIO() { + try (final Options options = new Options()) { + assertThat(options.avoidUnnecessaryBlockingIO()).isEqualTo(false); + assertThat(options.setAvoidUnnecessaryBlockingIO(true)).isEqualTo(options); + assertThat(options.avoidUnnecessaryBlockingIO()).isEqualTo(true); + } + } + + @Test + public void persistStatsToDisk() { + try (final Options options = new Options()) { + assertThat(options.persistStatsToDisk()).isEqualTo(false); + assertThat(options.setPersistStatsToDisk(true)).isEqualTo(options); + assertThat(options.persistStatsToDisk()).isEqualTo(true); + } + } + + @Test + public void writeDbidToManifest() { + try (final Options options = new Options()) { + assertThat(options.writeDbidToManifest()).isEqualTo(false); + assertThat(options.setWriteDbidToManifest(true)).isEqualTo(options); + assertThat(options.writeDbidToManifest()).isEqualTo(true); + } + } + + @Test + public void logReadaheadSize() { + try (final Options options = new Options()) { + assertThat(options.logReadaheadSize()).isEqualTo(0); + final int size = 1024 * 1024 * 100; + assertThat(options.setLogReadaheadSize(size)).isEqualTo(options); + assertThat(options.logReadaheadSize()).isEqualTo(size); + } + } + + @Test + public void bestEffortsRecovery() { + try (final Options options = new Options()) { + assertThat(options.bestEffortsRecovery()).isEqualTo(false); + assertThat(options.setBestEffortsRecovery(true)).isEqualTo(options); + assertThat(options.bestEffortsRecovery()).isEqualTo(true); + } + } + + @Test + public void maxBgerrorResumeCount() { + try (final Options options = new Options()) { + final int INT_MAX = 2147483647; + assertThat(options.maxBgerrorResumeCount()).isEqualTo(INT_MAX); + assertThat(options.setMaxBgErrorResumeCount(-1)).isEqualTo(options); + assertThat(options.maxBgerrorResumeCount()).isEqualTo(-1); + } + } + + @Test + public void bgerrorResumeRetryInterval() { + try (final Options options = new Options()) { + assertThat(options.bgerrorResumeRetryInterval()).isEqualTo(1000000); + final long newRetryInterval = 24 * 3600 * 1000000L; + assertThat(options.setBgerrorResumeRetryInterval(newRetryInterval)).isEqualTo(options); + assertThat(options.bgerrorResumeRetryInterval()).isEqualTo(newRetryInterval); + } + } + + @Test + public void maxWriteBatchGroupSizeBytes() { + try (final Options options = new Options()) { + assertThat(options.maxWriteBatchGroupSizeBytes()).isEqualTo(1024 * 1024); + final long size = 1024 * 1024 * 1024 * 10L; + assertThat(options.setMaxWriteBatchGroupSizeBytes(size)).isEqualTo(options); + assertThat(options.maxWriteBatchGroupSizeBytes()).isEqualTo(size); + } + } + + @Test + public void skipCheckingSstFileSizesOnDbOpen() { + try (final Options options = new Options()) { + assertThat(options.skipCheckingSstFileSizesOnDbOpen()).isEqualTo(false); + assertThat(options.setSkipCheckingSstFileSizesOnDbOpen(true)).isEqualTo(options); + assertThat(options.skipCheckingSstFileSizesOnDbOpen()).isEqualTo(true); + } + } + + @Test + public void eventListeners() { + final AtomicBoolean wasCalled1 = new AtomicBoolean(); + final AtomicBoolean wasCalled2 = new AtomicBoolean(); + try (final Options options = new Options(); + final AbstractEventListener el1 = + new AbstractEventListener() { + @Override + public void onTableFileDeleted(final TableFileDeletionInfo tableFileDeletionInfo) { + wasCalled1.set(true); + } + }; + final AbstractEventListener el2 = + new AbstractEventListener() { + @Override + public void onMemTableSealed(final MemTableInfo memTableInfo) { + wasCalled2.set(true); + } + }) { + assertThat(options.setListeners(Arrays.asList(el1, el2))).isEqualTo(options); + List listeners = options.listeners(); + assertEquals(el1, listeners.get(0)); + assertEquals(el2, listeners.get(1)); + options.setListeners(Collections.emptyList()); + listeners.get(0).onTableFileDeleted(null); + assertTrue(wasCalled1.get()); + listeners.get(1).onMemTableSealed(null); + assertTrue(wasCalled2.get()); + List listeners2 = options.listeners(); + assertNotNull(listeners2); + assertEquals(0, listeners2.size()); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/OptionsUtilTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/OptionsUtilTest.java new file mode 100644 index 000000000..b84314eec --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/OptionsUtilTest.java @@ -0,0 +1,126 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.*; + +import static org.assertj.core.api.Assertions.assertThat; + +public class OptionsUtilTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = new RocksNativeLibraryResource(); + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + enum TestAPI { LOAD_LATEST_OPTIONS, LOAD_OPTIONS_FROM_FILE } + + @Test + public void loadLatestOptions() throws RocksDBException { + verifyOptions(TestAPI.LOAD_LATEST_OPTIONS); + } + + @Test + public void loadOptionsFromFile() throws RocksDBException { + verifyOptions(TestAPI.LOAD_OPTIONS_FROM_FILE); + } + + @Test + public void getLatestOptionsFileName() throws RocksDBException { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db).isNotNull(); + } + + String fName = OptionsUtil.getLatestOptionsFileName(dbPath, Env.getDefault()); + assertThat(fName).isNotNull(); + assert(fName.startsWith("OPTIONS-") == true); + // System.out.println("latest options fileName: " + fName); + } + + private void verifyOptions(TestAPI apiType) throws RocksDBException { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + final Options options = new Options() + .setCreateIfMissing(true) + .setParanoidChecks(false) + .setMaxOpenFiles(478) + .setDelayedWriteRate(1234567L); + final ColumnFamilyOptions baseDefaultCFOpts = new ColumnFamilyOptions(); + final byte[] secondCFName = "new_cf".getBytes(); + final ColumnFamilyOptions baseSecondCFOpts = + new ColumnFamilyOptions() + .setWriteBufferSize(70 * 1024) + .setMaxWriteBufferNumber(7) + .setMaxBytesForLevelBase(53 * 1024 * 1024) + .setLevel0FileNumCompactionTrigger(3) + .setLevel0SlowdownWritesTrigger(51) + .setBottommostCompressionType(CompressionType.ZSTD_COMPRESSION); + + // Create a database with a new column family + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db).isNotNull(); + + // create column family + try (final ColumnFamilyHandle columnFamilyHandle = + db.createColumnFamily(new ColumnFamilyDescriptor(secondCFName, baseSecondCFOpts))) { + assert(columnFamilyHandle != null); + } + } + + // Read the options back and verify + DBOptions dbOptions = new DBOptions(); + final List cfDescs = new ArrayList<>(); + String path = dbPath; + if (apiType == TestAPI.LOAD_LATEST_OPTIONS) { + OptionsUtil.loadLatestOptions(path, Env.getDefault(), dbOptions, cfDescs, false); + } else if (apiType == TestAPI.LOAD_OPTIONS_FROM_FILE) { + path = dbPath + "/" + OptionsUtil.getLatestOptionsFileName(dbPath, Env.getDefault()); + OptionsUtil.loadOptionsFromFile(path, Env.getDefault(), dbOptions, cfDescs, false); + } + + assertThat(dbOptions.createIfMissing()).isEqualTo(options.createIfMissing()); + assertThat(dbOptions.paranoidChecks()).isEqualTo(options.paranoidChecks()); + assertThat(dbOptions.maxOpenFiles()).isEqualTo(options.maxOpenFiles()); + assertThat(dbOptions.delayedWriteRate()).isEqualTo(options.delayedWriteRate()); + + assertThat(cfDescs.size()).isEqualTo(2); + assertThat(cfDescs.get(0)).isNotNull(); + assertThat(cfDescs.get(1)).isNotNull(); + assertThat(cfDescs.get(0).getName()).isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY); + assertThat(cfDescs.get(1).getName()).isEqualTo(secondCFName); + + ColumnFamilyOptions defaultCFOpts = cfDescs.get(0).getOptions(); + assertThat(defaultCFOpts.writeBufferSize()).isEqualTo(baseDefaultCFOpts.writeBufferSize()); + assertThat(defaultCFOpts.maxWriteBufferNumber()) + .isEqualTo(baseDefaultCFOpts.maxWriteBufferNumber()); + assertThat(defaultCFOpts.maxBytesForLevelBase()) + .isEqualTo(baseDefaultCFOpts.maxBytesForLevelBase()); + assertThat(defaultCFOpts.level0FileNumCompactionTrigger()) + .isEqualTo(baseDefaultCFOpts.level0FileNumCompactionTrigger()); + assertThat(defaultCFOpts.level0SlowdownWritesTrigger()) + .isEqualTo(baseDefaultCFOpts.level0SlowdownWritesTrigger()); + assertThat(defaultCFOpts.bottommostCompressionType()) + .isEqualTo(baseDefaultCFOpts.bottommostCompressionType()); + + ColumnFamilyOptions secondCFOpts = cfDescs.get(1).getOptions(); + assertThat(secondCFOpts.writeBufferSize()).isEqualTo(baseSecondCFOpts.writeBufferSize()); + assertThat(secondCFOpts.maxWriteBufferNumber()) + .isEqualTo(baseSecondCFOpts.maxWriteBufferNumber()); + assertThat(secondCFOpts.maxBytesForLevelBase()) + .isEqualTo(baseSecondCFOpts.maxBytesForLevelBase()); + assertThat(secondCFOpts.level0FileNumCompactionTrigger()) + .isEqualTo(baseSecondCFOpts.level0FileNumCompactionTrigger()); + assertThat(secondCFOpts.level0SlowdownWritesTrigger()) + .isEqualTo(baseSecondCFOpts.level0SlowdownWritesTrigger()); + assertThat(secondCFOpts.bottommostCompressionType()) + .isEqualTo(baseSecondCFOpts.bottommostCompressionType()); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java new file mode 100644 index 000000000..c813dbbb4 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java @@ -0,0 +1,89 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class PlainTableConfigTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void keySize() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setKeySize(5); + assertThat(plainTableConfig.keySize()). + isEqualTo(5); + } + + @Test + public void bloomBitsPerKey() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setBloomBitsPerKey(11); + assertThat(plainTableConfig.bloomBitsPerKey()). + isEqualTo(11); + } + + @Test + public void hashTableRatio() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setHashTableRatio(0.95); + assertThat(plainTableConfig.hashTableRatio()). + isEqualTo(0.95); + } + + @Test + public void indexSparseness() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setIndexSparseness(18); + assertThat(plainTableConfig.indexSparseness()). + isEqualTo(18); + } + + @Test + public void hugePageTlbSize() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setHugePageTlbSize(1); + assertThat(plainTableConfig.hugePageTlbSize()). + isEqualTo(1); + } + + @Test + public void encodingType() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setEncodingType(EncodingType.kPrefix); + assertThat(plainTableConfig.encodingType()).isEqualTo( + EncodingType.kPrefix); + } + + @Test + public void fullScanMode() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setFullScanMode(true); + assertThat(plainTableConfig.fullScanMode()).isTrue(); } + + @Test + public void storeIndexInFile() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setStoreIndexInFile(true); + assertThat(plainTableConfig.storeIndexInFile()). + isTrue(); + } + + @Test + public void plainTableConfig() { + try(final Options opt = new Options()) { + final PlainTableConfig plainTableConfig = new PlainTableConfig(); + opt.setTableFormatConfig(plainTableConfig); + assertThat(opt.tableFactoryName()).isEqualTo("PlainTable"); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java b/src/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java new file mode 100644 index 000000000..80ea4d197 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java @@ -0,0 +1,58 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Random; + +/** + * Helper class to get the appropriate Random class instance dependent + * on the current platform architecture (32bit vs 64bit) + */ +public class PlatformRandomHelper { + /** + * Determine if OS is 32-Bit/64-Bit + * + * @return boolean value indicating if operating system is 64 Bit. + */ + public static boolean isOs64Bit(){ + final boolean is64Bit; + if (System.getProperty("os.name").contains("Windows")) { + is64Bit = (System.getenv("ProgramFiles(x86)") != null); + } else { + is64Bit = (System.getProperty("os.arch").contains("64")); + } + return is64Bit; + } + + /** + * Factory to get a platform specific Random instance + * + * @return {@link java.util.Random} instance. + */ + public static Random getPlatformSpecificRandomFactory(){ + if (isOs64Bit()) { + return new Random(); + } + return new Random32Bit(); + } + + /** + * Random32Bit is a class which overrides {@code nextLong} to + * provide random numbers which fit in size_t. This workaround + * is necessary because there is no unsigned_int < Java 8 + */ + private static class Random32Bit extends Random { + @Override + public long nextLong(){ + return this.nextInt(Integer.MAX_VALUE); + } + } + + /** + * Utility class constructor + */ + private PlatformRandomHelper() { } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/PutMultiplePartsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/PutMultiplePartsTest.java new file mode 100644 index 000000000..471ef0728 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/PutMultiplePartsTest.java @@ -0,0 +1,164 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@RunWith(Parameterized.class) +public class PutMultiplePartsTest { + @Parameterized.Parameters + public static List data() { + return Arrays.asList(2, 3, 250, 20000); + } + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + private final int numParts; + + public PutMultiplePartsTest(final Integer numParts) { + this.numParts = numParts; + } + + @Test + public void putUntracked() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB txnDB = + TransactionDB.open(options, txnDbOptions, dbFolder.getRoot().getAbsolutePath())) { + try (final Transaction transaction = txnDB.beginTransaction(new WriteOptions())) { + final byte[][] keys = generateItems("key", ":", numParts); + final byte[][] values = generateItems("value", "", numParts); + transaction.putUntracked(keys, values); + transaction.commit(); + } + txnDB.syncWal(); + } + + validateResults(); + } + + @Test + public void put() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB txnDB = + TransactionDB.open(options, txnDbOptions, dbFolder.getRoot().getAbsolutePath())) { + try (final Transaction transaction = txnDB.beginTransaction(new WriteOptions())) { + final byte[][] keys = generateItems("key", ":", numParts); + final byte[][] values = generateItems("value", "", numParts); + transaction.put(keys, values); + transaction.commit(); + } + txnDB.syncWal(); + } + + validateResults(); + } + + @Test + public void putUntrackedCF() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB txnDB = + TransactionDB.open(options, txnDbOptions, dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyHandle columnFamilyHandle = + txnDB.createColumnFamily(new ColumnFamilyDescriptor("cfTest".getBytes()))) { + try (final Transaction transaction = txnDB.beginTransaction(new WriteOptions())) { + final byte[][] keys = generateItems("key", ":", numParts); + final byte[][] values = generateItems("value", "", numParts); + transaction.putUntracked(columnFamilyHandle, keys, values); + transaction.commit(); + } + txnDB.syncWal(); + } + + validateResultsCF(); + } + @Test + public void putCF() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB txnDB = + TransactionDB.open(options, txnDbOptions, dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyHandle columnFamilyHandle = + txnDB.createColumnFamily(new ColumnFamilyDescriptor("cfTest".getBytes()))) { + try (final Transaction transaction = txnDB.beginTransaction(new WriteOptions())) { + final byte[][] keys = generateItems("key", ":", numParts); + final byte[][] values = generateItems("value", "", numParts); + transaction.put(columnFamilyHandle, keys, values); + transaction.commit(); + } + txnDB.syncWal(); + } + + validateResultsCF(); + } + + private void validateResults() throws RocksDBException { + try (final RocksDB db = RocksDB.open(new Options(), dbFolder.getRoot().getAbsolutePath())) { + final List keys = generateItemsAsList("key", ":", numParts); + final byte[][] values = generateItems("value", "", numParts); + + StringBuilder singleKey = new StringBuilder(); + for (int i = 0; i < numParts; i++) { + singleKey.append(new String(keys.get(i), StandardCharsets.UTF_8)); + } + final byte[] result = db.get(singleKey.toString().getBytes()); + StringBuilder singleValue = new StringBuilder(); + for (int i = 0; i < numParts; i++) { + singleValue.append(new String(values[i], StandardCharsets.UTF_8)); + } + assertThat(result).isEqualTo(singleValue.toString().getBytes()); + } + } + + private void validateResultsCF() throws RocksDBException { + final List columnFamilyDescriptors = new ArrayList<>(); + columnFamilyDescriptors.add(new ColumnFamilyDescriptor("cfTest".getBytes())); + columnFamilyDescriptors.add(new ColumnFamilyDescriptor("default".getBytes())); + final List columnFamilyHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(new DBOptions(), dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + final List keys = generateItemsAsList("key", ":", numParts); + final byte[][] values = generateItems("value", "", numParts); + + StringBuilder singleKey = new StringBuilder(); + for (int i = 0; i < numParts; i++) { + singleKey.append(new String(keys.get(i), StandardCharsets.UTF_8)); + } + final byte[] result = db.get(columnFamilyHandles.get(0), singleKey.toString().getBytes()); + StringBuilder singleValue = new StringBuilder(); + for (int i = 0; i < numParts; i++) { + singleValue.append(new String(values[i], StandardCharsets.UTF_8)); + } + assertThat(result).isEqualTo(singleValue.toString().getBytes()); + } + } + + private byte[][] generateItems(final String prefix, final String suffix, final int numItems) { + return generateItemsAsList(prefix, suffix, numItems).toArray(new byte[0][0]); + } + + private List generateItemsAsList( + final String prefix, final String suffix, final int numItems) { + final List items = new ArrayList<>(); + for (int i = 0; i < numItems; i++) { + items.add((prefix + i + suffix).getBytes()); + } + return items; + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java new file mode 100644 index 000000000..e7d6e6c49 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/RateLimiterTest.java @@ -0,0 +1,65 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.rocksdb.RateLimiter.*; + +public class RateLimiterTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void bytesPerSecond() { + try(final RateLimiter rateLimiter = + new RateLimiter(1000, DEFAULT_REFILL_PERIOD_MICROS, + DEFAULT_FAIRNESS, DEFAULT_MODE, DEFAULT_AUTOTUNE)) { + assertThat(rateLimiter.getBytesPerSecond()).isGreaterThan(0); + rateLimiter.setBytesPerSecond(2000); + assertThat(rateLimiter.getBytesPerSecond()).isGreaterThan(0); + } + } + + @Test + public void getSingleBurstBytes() { + try(final RateLimiter rateLimiter = + new RateLimiter(1000, DEFAULT_REFILL_PERIOD_MICROS, + DEFAULT_FAIRNESS, DEFAULT_MODE, DEFAULT_AUTOTUNE)) { + assertThat(rateLimiter.getSingleBurstBytes()).isEqualTo(100); + } + } + + @Test + public void getTotalBytesThrough() { + try(final RateLimiter rateLimiter = + new RateLimiter(1000, DEFAULT_REFILL_PERIOD_MICROS, + DEFAULT_FAIRNESS, DEFAULT_MODE, DEFAULT_AUTOTUNE)) { + assertThat(rateLimiter.getTotalBytesThrough()).isEqualTo(0); + } + } + + @Test + public void getTotalRequests() { + try(final RateLimiter rateLimiter = + new RateLimiter(1000, DEFAULT_REFILL_PERIOD_MICROS, + DEFAULT_FAIRNESS, DEFAULT_MODE, DEFAULT_AUTOTUNE)) { + assertThat(rateLimiter.getTotalRequests()).isEqualTo(0); + } + } + + @Test + public void autoTune() { + try(final RateLimiter rateLimiter = + new RateLimiter(1000, DEFAULT_REFILL_PERIOD_MICROS, + DEFAULT_FAIRNESS, DEFAULT_MODE, true)) { + assertThat(rateLimiter.getBytesPerSecond()).isGreaterThan(0); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java new file mode 100644 index 000000000..5b40a5df1 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java @@ -0,0 +1,234 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ReadOnlyTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void readOnlyOpen() throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + } + try (final RocksDB db = RocksDB.openReadOnly(dbFolder.getRoot().getAbsolutePath())) { + assertThat("value").isEqualTo(new String(db.get("key".getBytes()))); + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = new ArrayList<>(); + cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); + final List columnFamilyHandleList = new ArrayList<>(); + try (final RocksDB db = RocksDB.open( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, columnFamilyHandleList)) { + columnFamilyHandleList.add( + db.createColumnFamily(new ColumnFamilyDescriptor("new_cf".getBytes(), cfOpts))); + columnFamilyHandleList.add( + db.createColumnFamily(new ColumnFamilyDescriptor("new_cf2".getBytes(), cfOpts))); + db.put(columnFamilyHandleList.get(2), "key2".getBytes(), "value2".getBytes()); + } + + columnFamilyHandleList.clear(); + try (final RocksDB db = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, columnFamilyHandleList)) { + assertThat(db.get("key2".getBytes())).isNull(); + assertThat(db.get(columnFamilyHandleList.get(0), "key2".getBytes())).isNull(); + } + + cfDescriptors.clear(); + cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); + cfDescriptors.add(new ColumnFamilyDescriptor("new_cf2".getBytes(), cfOpts)); + columnFamilyHandleList.clear(); + try (final RocksDB db = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, columnFamilyHandleList)) { + assertThat(new String(db.get(columnFamilyHandleList.get(1), "key2".getBytes()))) + .isEqualTo("value2"); + } + } + } + + @Test(expected = RocksDBException.class) + public void failToWriteInReadOnly() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + try (final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + // no-op + } + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = + Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); + + final List readOnlyColumnFamilyHandleList = new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly(dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, readOnlyColumnFamilyHandleList)) { + // test that put fails in readonly mode + rDb.put("key".getBytes(), "value".getBytes()); + } + } + } + + @Test(expected = RocksDBException.class) + public void failToCFWriteInReadOnly() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList)) { + rDb.put(readOnlyColumnFamilyHandleList.get(0), "key".getBytes(), "value".getBytes()); + } + } + } + + @Test(expected = RocksDBException.class) + public void failToRemoveInReadOnly() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + + try (final RocksDB rDb = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList)) { + rDb.delete("key".getBytes()); + } + } + } + + @Test(expected = RocksDBException.class) + public void failToCFRemoveInReadOnly() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList)) { + rDb.delete(readOnlyColumnFamilyHandleList.get(0), + "key".getBytes()); + } + } + } + + @Test(expected = RocksDBException.class) + public void failToWriteBatchReadOnly() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList); + final WriteBatch wb = new WriteBatch(); + final WriteOptions wOpts = new WriteOptions()) { + wb.put("key".getBytes(), "value".getBytes()); + rDb.write(wOpts, wb); + } + } + } + + @Test(expected = RocksDBException.class) + public void failToCFWriteBatchReadOnly() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList); + final WriteBatch wb = new WriteBatch(); + final WriteOptions wOpts = new WriteOptions()) { + wb.put(readOnlyColumnFamilyHandleList.get(0), "key".getBytes(), + "value".getBytes()); + rDb.write(wOpts, wb); + } + } + } + + @Test(expected = RocksDBException.class) + public void errorIfWalFileExists() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + // no-op + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = + Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); + + final List readOnlyColumnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions(); + final RocksDB rDb = RocksDB.openReadOnly(options, dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, readOnlyColumnFamilyHandleList, true);) { + // no-op... should have raised an error as errorIfWalFileExists=true + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java new file mode 100644 index 000000000..156dd3730 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java @@ -0,0 +1,375 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Arrays; +import java.util.Random; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ReadOptionsTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void altConstructor() { + try (final ReadOptions opt = new ReadOptions(true, true)) { + assertThat(opt.verifyChecksums()).isTrue(); + assertThat(opt.fillCache()).isTrue(); + } + } + + @Test + public void copyConstructor() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setVerifyChecksums(false); + opt.setFillCache(false); + opt.setIterateUpperBound(buildRandomSlice()); + opt.setIterateLowerBound(buildRandomSlice()); + opt.setTimestamp(buildRandomSlice()); + opt.setIterStartTs(buildRandomSlice()); + try (final ReadOptions other = new ReadOptions(opt)) { + assertThat(opt.verifyChecksums()).isEqualTo(other.verifyChecksums()); + assertThat(opt.fillCache()).isEqualTo(other.fillCache()); + assertThat(Arrays.equals(opt.iterateUpperBound().data(), other.iterateUpperBound().data())).isTrue(); + assertThat(Arrays.equals(opt.iterateLowerBound().data(), other.iterateLowerBound().data())).isTrue(); + assertThat(Arrays.equals(opt.timestamp().data(), other.timestamp().data())).isTrue(); + assertThat(Arrays.equals(opt.iterStartTs().data(), other.iterStartTs().data())).isTrue(); + } + } + } + + @Test + public void verifyChecksum() { + try (final ReadOptions opt = new ReadOptions()) { + final Random rand = new Random(); + final boolean boolValue = rand.nextBoolean(); + opt.setVerifyChecksums(boolValue); + assertThat(opt.verifyChecksums()).isEqualTo(boolValue); + } + } + + @Test + public void fillCache() { + try (final ReadOptions opt = new ReadOptions()) { + final Random rand = new Random(); + final boolean boolValue = rand.nextBoolean(); + opt.setFillCache(boolValue); + assertThat(opt.fillCache()).isEqualTo(boolValue); + } + } + + @Test + public void tailing() { + try (final ReadOptions opt = new ReadOptions()) { + final Random rand = new Random(); + final boolean boolValue = rand.nextBoolean(); + opt.setTailing(boolValue); + assertThat(opt.tailing()).isEqualTo(boolValue); + } + } + + @Test + public void snapshot() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setSnapshot(null); + assertThat(opt.snapshot()).isNull(); + } + } + + @Test + public void readTier() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setReadTier(ReadTier.BLOCK_CACHE_TIER); + assertThat(opt.readTier()).isEqualTo(ReadTier.BLOCK_CACHE_TIER); + } + } + + @SuppressWarnings("deprecated") + @Test + public void managed() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setManaged(true); + assertThat(opt.managed()).isTrue(); + } + } + + @Test + public void totalOrderSeek() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setTotalOrderSeek(true); + assertThat(opt.totalOrderSeek()).isTrue(); + } + } + + @Test + public void prefixSameAsStart() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setPrefixSameAsStart(true); + assertThat(opt.prefixSameAsStart()).isTrue(); + } + } + + @Test + public void pinData() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setPinData(true); + assertThat(opt.pinData()).isTrue(); + } + } + + @Test + public void backgroundPurgeOnIteratorCleanup() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setBackgroundPurgeOnIteratorCleanup(true); + assertThat(opt.backgroundPurgeOnIteratorCleanup()).isTrue(); + } + } + + @Test + public void readaheadSize() { + try (final ReadOptions opt = new ReadOptions()) { + final Random rand = new Random(); + final int intValue = rand.nextInt(2147483647); + opt.setReadaheadSize(intValue); + assertThat(opt.readaheadSize()).isEqualTo(intValue); + } + } + + @Test + public void ignoreRangeDeletions() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setIgnoreRangeDeletions(true); + assertThat(opt.ignoreRangeDeletions()).isTrue(); + } + } + + @Test + public void iterateUpperBound() { + try (final ReadOptions opt = new ReadOptions()) { + Slice upperBound = buildRandomSlice(); + opt.setIterateUpperBound(upperBound); + assertThat(Arrays.equals(upperBound.data(), opt.iterateUpperBound().data())).isTrue(); + opt.setIterateUpperBound(null); + assertThat(opt.iterateUpperBound()).isNull(); + } + } + + @Test + public void iterateUpperBoundNull() { + try (final ReadOptions opt = new ReadOptions()) { + assertThat(opt.iterateUpperBound()).isNull(); + } + } + + @Test + public void iterateLowerBound() { + try (final ReadOptions opt = new ReadOptions()) { + Slice lowerBound = buildRandomSlice(); + opt.setIterateLowerBound(lowerBound); + assertThat(Arrays.equals(lowerBound.data(), opt.iterateLowerBound().data())).isTrue(); + opt.setIterateLowerBound(null); + assertThat(opt.iterateLowerBound()).isNull(); + } + } + + @Test + public void iterateLowerBoundNull() { + try (final ReadOptions opt = new ReadOptions()) { + assertThat(opt.iterateLowerBound()).isNull(); + } + } + + @Test + public void tableFilter() { + try (final ReadOptions opt = new ReadOptions(); + final AbstractTableFilter allTablesFilter = new AllTablesFilter()) { + opt.setTableFilter(allTablesFilter); + } + } + + @Test + public void autoPrefixMode() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setAutoPrefixMode(true); + assertThat(opt.autoPrefixMode()).isTrue(); + } + } + + @Test + public void timestamp() { + try (final ReadOptions opt = new ReadOptions()) { + Slice timestamp = buildRandomSlice(); + opt.setTimestamp(timestamp); + assertThat(Arrays.equals(timestamp.data(), opt.timestamp().data())).isTrue(); + opt.setTimestamp(null); + assertThat(opt.timestamp()).isNull(); + } + } + + @Test + public void iterStartTs() { + try (final ReadOptions opt = new ReadOptions()) { + Slice itertStartTsSlice = buildRandomSlice(); + opt.setIterStartTs(itertStartTsSlice); + assertThat(Arrays.equals(itertStartTsSlice.data(), opt.iterStartTs().data())).isTrue(); + opt.setIterStartTs(null); + assertThat(opt.iterStartTs()).isNull(); + } + } + + @Test + public void deadline() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setDeadline(1999l); + assertThat(opt.deadline()).isEqualTo(1999l); + } + } + + @Test + public void ioTimeout() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setIoTimeout(34555l); + assertThat(opt.ioTimeout()).isEqualTo(34555l); + } + } + + @Test + public void valueSizeSoftLimit() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setValueSizeSoftLimit(12134324l); + assertThat(opt.valueSizeSoftLimit()).isEqualTo(12134324l); + } + } + + @Test + public void failSetVerifyChecksumUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setVerifyChecksums(true); + } + } + + @Test + public void failVerifyChecksumUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.verifyChecksums(); + } + } + + @Test + public void failSetFillCacheUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setFillCache(true); + } + } + + @Test + public void failFillCacheUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.fillCache(); + } + } + + @Test + public void failSetTailingUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setTailing(true); + } + } + + @Test + public void failTailingUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.tailing(); + } + } + + @Test + public void failSetSnapshotUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setSnapshot(null); + } + } + + @Test + public void failSnapshotUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.snapshot(); + } + } + + @Test + public void failSetIterateUpperBoundUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setIterateUpperBound(null); + } + } + + @Test + public void failIterateUpperBoundUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.iterateUpperBound(); + } + } + + @Test + public void failSetIterateLowerBoundUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setIterateLowerBound(null); + } + } + + @Test + public void failIterateLowerBoundUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.iterateLowerBound(); + } + } + + private ReadOptions setupUninitializedReadOptions( + ExpectedException exception) { + final ReadOptions readOptions = new ReadOptions(); + readOptions.close(); + exception.expect(AssertionError.class); + return readOptions; + } + + private Slice buildRandomSlice() { + final Random rand = new Random(); + byte[] sliceBytes = new byte[rand.nextInt(100) + 1]; + rand.nextBytes(sliceBytes); + return new Slice(sliceBytes); + } + + private static class AllTablesFilter extends AbstractTableFilter { + @Override + public boolean filter(final TableProperties tableProperties) { + return true; + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java new file mode 100644 index 000000000..d3bd4ece7 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBExceptionTest.java @@ -0,0 +1,115 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import org.rocksdb.Status.Code; +import org.rocksdb.Status.SubCode; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.fail; + +public class RocksDBExceptionTest { + + @Test + public void exception() { + try { + raiseException(); + } catch(final RocksDBException e) { + assertThat(e.getStatus()).isNull(); + assertThat(e.getMessage()).isEqualTo("test message"); + return; + } + fail(); + } + + @Test + public void exceptionWithStatusCode() { + try { + raiseExceptionWithStatusCode(); + } catch(final RocksDBException e) { + assertThat(e.getStatus()).isNotNull(); + assertThat(e.getStatus().getCode()).isEqualTo(Code.NotSupported); + assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.None); + assertThat(e.getStatus().getState()).isNull(); + assertThat(e.getMessage()).isEqualTo("test message"); + return; + } + fail(); + } + + @Test + public void exceptionNoMsgWithStatusCode() { + try { + raiseExceptionNoMsgWithStatusCode(); + } catch(final RocksDBException e) { + assertThat(e.getStatus()).isNotNull(); + assertThat(e.getStatus().getCode()).isEqualTo(Code.NotSupported); + assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.None); + assertThat(e.getStatus().getState()).isNull(); + assertThat(e.getMessage()).isEqualTo(Code.NotSupported.name()); + return; + } + fail(); + } + + @Test + public void exceptionWithStatusCodeSubCode() { + try { + raiseExceptionWithStatusCodeSubCode(); + } catch(final RocksDBException e) { + assertThat(e.getStatus()).isNotNull(); + assertThat(e.getStatus().getCode()).isEqualTo(Code.TimedOut); + assertThat(e.getStatus().getSubCode()) + .isEqualTo(Status.SubCode.LockTimeout); + assertThat(e.getStatus().getState()).isNull(); + assertThat(e.getMessage()).isEqualTo("test message"); + return; + } + fail(); + } + + @Test + public void exceptionNoMsgWithStatusCodeSubCode() { + try { + raiseExceptionNoMsgWithStatusCodeSubCode(); + } catch(final RocksDBException e) { + assertThat(e.getStatus()).isNotNull(); + assertThat(e.getStatus().getCode()).isEqualTo(Code.TimedOut); + assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.LockTimeout); + assertThat(e.getStatus().getState()).isNull(); + assertThat(e.getMessage()).isEqualTo(Code.TimedOut.name() + + "(" + SubCode.LockTimeout.name() + ")"); + return; + } + fail(); + } + + @Test + public void exceptionWithStatusCodeState() { + try { + raiseExceptionWithStatusCodeState(); + } catch(final RocksDBException e) { + assertThat(e.getStatus()).isNotNull(); + assertThat(e.getStatus().getCode()).isEqualTo(Code.NotSupported); + assertThat(e.getStatus().getSubCode()).isEqualTo(SubCode.None); + assertThat(e.getStatus().getState()).isNotNull(); + assertThat(e.getMessage()).isEqualTo("test message"); + return; + } + fail(); + } + + private native void raiseException() throws RocksDBException; + private native void raiseExceptionWithStatusCode() throws RocksDBException; + private native void raiseExceptionNoMsgWithStatusCode() throws RocksDBException; + private native void raiseExceptionWithStatusCodeSubCode() + throws RocksDBException; + private native void raiseExceptionNoMsgWithStatusCodeSubCode() + throws RocksDBException; + private native void raiseExceptionWithStatusCodeState() + throws RocksDBException; +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java new file mode 100644 index 000000000..422bed40c --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java @@ -0,0 +1,1695 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.*; +import org.junit.rules.ExpectedException; +import org.junit.rules.TemporaryFolder; + +import java.nio.ByteBuffer; +import java.util.*; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.fail; + +public class RocksDBTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + public static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void open() throws RocksDBException { + try (final RocksDB db = + RocksDB.open(dbFolder.getRoot().getAbsolutePath())) { + assertThat(db).isNotNull(); + } + } + + @Test + public void open_opt() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(db).isNotNull(); + } + } + + @Test + public void openWhenOpen() throws RocksDBException { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + + try (final RocksDB db1 = RocksDB.open(dbPath)) { + try (final RocksDB db2 = RocksDB.open(dbPath)) { + fail("Should have thrown an exception when opening the same db twice"); + } catch (final RocksDBException e) { + assertThat(e.getStatus().getCode()).isEqualTo(Status.Code.IOError); + assertThat(e.getStatus().getSubCode()).isEqualTo(Status.SubCode.None); + assertThat(e.getStatus().getState()).contains("lock "); + } + } + } + + @Test + public void createColumnFamily() throws RocksDBException { + final byte[] col1Name = "col1".getBytes(UTF_8); + + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions() + ) { + try (final ColumnFamilyHandle col1 = + db.createColumnFamily(new ColumnFamilyDescriptor(col1Name, cfOpts))) { + assertThat(col1).isNotNull(); + assertThat(col1.getName()).isEqualTo(col1Name); + } + } + + final List cfHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(col1Name)), + cfHandles)) { + try { + assertThat(cfHandles.size()).isEqualTo(2); + assertThat(cfHandles.get(1)).isNotNull(); + assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name); + } finally { + for (final ColumnFamilyHandle cfHandle : + cfHandles) { + cfHandle.close(); + } + } + } + } + + + @Test + public void createColumnFamilies() throws RocksDBException { + final byte[] col1Name = "col1".getBytes(UTF_8); + final byte[] col2Name = "col2".getBytes(UTF_8); + + List cfHandles; + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions() + ) { + cfHandles = + db.createColumnFamilies(cfOpts, Arrays.asList(col1Name, col2Name)); + try { + assertThat(cfHandles).isNotNull(); + assertThat(cfHandles.size()).isEqualTo(2); + assertThat(cfHandles.get(0).getName()).isEqualTo(col1Name); + assertThat(cfHandles.get(1).getName()).isEqualTo(col2Name); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + + cfHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(col1Name), + new ColumnFamilyDescriptor(col2Name)), + cfHandles)) { + try { + assertThat(cfHandles.size()).isEqualTo(3); + assertThat(cfHandles.get(1)).isNotNull(); + assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name); + assertThat(cfHandles.get(2)).isNotNull(); + assertThat(cfHandles.get(2).getName()).isEqualTo(col2Name); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + + @Test + public void createColumnFamiliesfromDescriptors() throws RocksDBException { + final byte[] col1Name = "col1".getBytes(UTF_8); + final byte[] col2Name = "col2".getBytes(UTF_8); + + List cfHandles; + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions() + ) { + cfHandles = + db.createColumnFamilies(Arrays.asList( + new ColumnFamilyDescriptor(col1Name, cfOpts), + new ColumnFamilyDescriptor(col2Name, cfOpts))); + try { + assertThat(cfHandles).isNotNull(); + assertThat(cfHandles.size()).isEqualTo(2); + assertThat(cfHandles.get(0).getName()).isEqualTo(col1Name); + assertThat(cfHandles.get(1).getName()).isEqualTo(col2Name); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + + cfHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(col1Name), + new ColumnFamilyDescriptor(col2Name)), + cfHandles)) { + try { + assertThat(cfHandles.size()).isEqualTo(3); + assertThat(cfHandles.get(1)).isNotNull(); + assertThat(cfHandles.get(1).getName()).isEqualTo(col1Name); + assertThat(cfHandles.get(2)).isNotNull(); + assertThat(cfHandles.get(2).getName()).isEqualTo(col2Name); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + + @Test + public void put() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final WriteOptions opt = new WriteOptions(); final ReadOptions optr = new ReadOptions()) { + db.put("key1".getBytes(), "value".getBytes()); + db.put(opt, "key2".getBytes(), "12345678".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo( + "value".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo( + "12345678".getBytes()); + + ByteBuffer key = ByteBuffer.allocateDirect(12); + ByteBuffer value = ByteBuffer.allocateDirect(12); + key.position(4); + key.put("key3".getBytes()); + key.position(4).limit(8); + value.position(4); + value.put("val3".getBytes()); + value.position(4).limit(8); + + db.put(opt, key, value); + + assertThat(key.position()).isEqualTo(8); + assertThat(key.limit()).isEqualTo(8); + + assertThat(value.position()).isEqualTo(8); + assertThat(value.limit()).isEqualTo(8); + + key.position(4); + + ByteBuffer result = ByteBuffer.allocateDirect(12); + assertThat(db.get(optr, key, result)).isEqualTo(4); + assertThat(result.position()).isEqualTo(0); + assertThat(result.limit()).isEqualTo(4); + assertThat(key.position()).isEqualTo(8); + assertThat(key.limit()).isEqualTo(8); + + byte[] tmp = new byte[4]; + result.get(tmp); + assertThat(tmp).isEqualTo("val3".getBytes()); + + key.position(4); + + result.clear().position(9); + assertThat(db.get(optr, key, result)).isEqualTo(4); + assertThat(result.position()).isEqualTo(9); + assertThat(result.limit()).isEqualTo(12); + assertThat(key.position()).isEqualTo(8); + assertThat(key.limit()).isEqualTo(8); + byte[] tmp2 = new byte[3]; + result.get(tmp2); + assertThat(tmp2).isEqualTo("val".getBytes()); + + // put + Segment key3 = sliceSegment("key3"); + Segment key4 = sliceSegment("key4"); + Segment value0 = sliceSegment("value 0"); + Segment value1 = sliceSegment("value 1"); + db.put(key3.data, key3.offset, key3.len, value0.data, value0.offset, value0.len); + db.put(opt, key4.data, key4.offset, key4.len, value1.data, value1.offset, value1.len); + + // compare + Assert.assertTrue(value0.isSamePayload(db.get(key3.data, key3.offset, key3.len))); + Assert.assertTrue(value1.isSamePayload(db.get(key4.data, key4.offset, key4.len))); + } + } + + private static Segment sliceSegment(String key) { + ByteBuffer rawKey = ByteBuffer.allocate(key.length() + 4); + rawKey.put((byte)0); + rawKey.put((byte)0); + rawKey.put(key.getBytes()); + + return new Segment(rawKey.array(), 2, key.length()); + } + + private static class Segment { + final byte[] data; + final int offset; + final int len; + + public boolean isSamePayload(byte[] value) { + if (value == null) { + return false; + } + if (value.length != len) { + return false; + } + + for (int i = 0; i < value.length; i++) { + if (data[i + offset] != value[i]) { + return false; + } + } + + return true; + } + + public Segment(byte[] value, int offset, int len) { + this.data = value; + this.offset = offset; + this.len = len; + } + } + + @Test + public void write() throws RocksDBException { + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final Options options = new Options() + .setMergeOperator(stringAppendOperator) + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions opts = new WriteOptions()) { + + try (final WriteBatch wb1 = new WriteBatch()) { + wb1.put("key1".getBytes(), "aa".getBytes()); + wb1.merge("key1".getBytes(), "bb".getBytes()); + + try (final WriteBatch wb2 = new WriteBatch()) { + wb2.put("key2".getBytes(), "xx".getBytes()); + wb2.merge("key2".getBytes(), "yy".getBytes()); + db.write(opts, wb1); + db.write(opts, wb2); + } + } + + assertThat(db.get("key1".getBytes())).isEqualTo( + "aa,bb".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo( + "xx,yy".getBytes()); + } + } + + @Test + public void getWithOutValue() throws RocksDBException { + try (final RocksDB db = + RocksDB.open(dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + byte[] outValue = new byte[5]; + // not found value + int getResult = db.get("keyNotFound".getBytes(), outValue); + assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND); + // found value which fits in outValue + getResult = db.get("key1".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("value".getBytes()); + // found value which fits partially + getResult = db.get("key2".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("12345".getBytes()); + } + } + + @Test + public void getWithOutValueReadOptions() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ReadOptions rOpt = new ReadOptions()) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + byte[] outValue = new byte[5]; + // not found value + int getResult = db.get(rOpt, "keyNotFound".getBytes(), + outValue); + assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND); + // found value which fits in outValue + getResult = db.get(rOpt, "key1".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("value".getBytes()); + // found value which fits partially + getResult = db.get(rOpt, "key2".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("12345".getBytes()); + } + } + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void getOutOfArrayMaxSizeValue() throws RocksDBException { + final int numberOfValueSplits = 10; + final int splitSize = Integer.MAX_VALUE / numberOfValueSplits; + + Runtime runtime = Runtime.getRuntime(); + long neededMemory = ((long)(splitSize)) * (((long)numberOfValueSplits) + 3); + boolean isEnoughMemory = runtime.maxMemory() - runtime.totalMemory() > neededMemory; + Assume.assumeTrue(isEnoughMemory); + + final byte[] valueSplit = new byte[splitSize]; + final byte[] key = "key".getBytes(); + + thrown.expect(RocksDBException.class); + thrown.expectMessage("Requested array size exceeds VM limit"); + + // merge (numberOfValueSplits + 1) valueSplit's to get value size exceeding Integer.MAX_VALUE + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + db.put(key, valueSplit); + for (int i = 0; i < numberOfValueSplits; i++) { + db.merge(key, valueSplit); + } + db.get(key); + } + } + + @Test + public void multiGetAsList() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ReadOptions rOpt = new ReadOptions()) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + List lookupKeys = new ArrayList<>(); + lookupKeys.add("key1".getBytes()); + lookupKeys.add("key2".getBytes()); + List results = db.multiGetAsList(lookupKeys); + assertThat(results).isNotNull(); + assertThat(results).hasSize(lookupKeys.size()); + assertThat(results). + containsExactly("value".getBytes(), "12345678".getBytes()); + // test same method with ReadOptions + results = db.multiGetAsList(rOpt, lookupKeys); + assertThat(results).isNotNull(); + assertThat(results). + contains("value".getBytes(), "12345678".getBytes()); + + // remove existing key + lookupKeys.remove(1); + // add non existing key + lookupKeys.add("key3".getBytes()); + results = db.multiGetAsList(lookupKeys); + assertThat(results).isNotNull(); + assertThat(results). + containsExactly("value".getBytes(), null); + // test same call with readOptions + results = db.multiGetAsList(rOpt, lookupKeys); + assertThat(results).isNotNull(); + assertThat(results).contains("value".getBytes()); + } + } + + @Test + public void merge() throws RocksDBException { + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(stringAppendOperator); + final WriteOptions wOpt = new WriteOptions(); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()) + ) { + db.put("key1".getBytes(), "value".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo( + "value".getBytes()); + // merge key1 with another value portion + db.merge("key1".getBytes(), "value2".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo( + "value,value2".getBytes()); + // merge key1 with another value portion + db.merge(wOpt, "key1".getBytes(), "value3".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo( + "value,value2,value3".getBytes()); + // merge on non existent key shall insert the value + db.merge(wOpt, "key2".getBytes(), "xxxx".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo( + "xxxx".getBytes()); + + Segment key3 = sliceSegment("key3"); + Segment key4 = sliceSegment("key4"); + Segment value0 = sliceSegment("value 0"); + Segment value1 = sliceSegment("value 1"); + + db.merge(key3.data, key3.offset, key3.len, value0.data, value0.offset, value0.len); + db.merge(wOpt, key4.data, key4.offset, key4.len, value1.data, value1.offset, value1.len); + + // compare + Assert.assertTrue(value0.isSamePayload(db.get(key3.data, key3.offset, key3.len))); + Assert.assertTrue(value1.isSamePayload(db.get(key4.data, key4.offset, key4.len))); + } + } + + @Test + public void delete() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final WriteOptions wOpt = new WriteOptions()) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + db.put("key3".getBytes(), "33".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo( + "value".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo( + "12345678".getBytes()); + assertThat(db.get("key3".getBytes())).isEqualTo("33".getBytes()); + db.delete("key1".getBytes()); + db.delete(wOpt, "key2".getBytes()); + ByteBuffer key = ByteBuffer.allocateDirect(16); + key.put("key3".getBytes()).flip(); + db.delete(wOpt, key); + assertThat(key.position()).isEqualTo(4); + assertThat(key.limit()).isEqualTo(4); + + assertThat(db.get("key1".getBytes())).isNull(); + assertThat(db.get("key2".getBytes())).isNull(); + + Segment key3 = sliceSegment("key3"); + Segment key4 = sliceSegment("key4"); + db.put("key3".getBytes(), "key3 value".getBytes()); + db.put("key4".getBytes(), "key4 value".getBytes()); + + db.delete(key3.data, key3.offset, key3.len); + db.delete(wOpt, key4.data, key4.offset, key4.len); + + assertThat(db.get("key3".getBytes())).isNull(); + assertThat(db.get("key4".getBytes())).isNull(); + } + } + + @Test + public void singleDelete() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final WriteOptions wOpt = new WriteOptions()) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo( + "value".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo( + "12345678".getBytes()); + db.singleDelete("key1".getBytes()); + db.singleDelete(wOpt, "key2".getBytes()); + assertThat(db.get("key1".getBytes())).isNull(); + assertThat(db.get("key2".getBytes())).isNull(); + } + } + + @Test + public void singleDelete_nonExisting() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final WriteOptions wOpt = new WriteOptions()) { + db.singleDelete("key1".getBytes()); + db.singleDelete(wOpt, "key2".getBytes()); + assertThat(db.get("key1".getBytes())).isNull(); + assertThat(db.get("key2".getBytes())).isNull(); + } + } + + @Test + public void deleteRange() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + db.put("key3".getBytes(), "abcdefg".getBytes()); + db.put("key4".getBytes(), "xyz".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo("12345678".getBytes()); + assertThat(db.get("key3".getBytes())).isEqualTo("abcdefg".getBytes()); + assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes()); + db.deleteRange("key2".getBytes(), "key4".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes()); + assertThat(db.get("key2".getBytes())).isNull(); + assertThat(db.get("key3".getBytes())).isNull(); + assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes()); + } + } + + @Test + public void getIntProperty() throws RocksDBException { + try ( + final Options options = new Options() + .setCreateIfMissing(true) + .setMaxWriteBufferNumber(10) + .setMinWriteBufferNumberToMerge(10); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions wOpt = new WriteOptions().setDisableWAL(true) + ) { + db.put(wOpt, "key1".getBytes(), "value1".getBytes()); + db.put(wOpt, "key2".getBytes(), "value2".getBytes()); + db.put(wOpt, "key3".getBytes(), "value3".getBytes()); + db.put(wOpt, "key4".getBytes(), "value4".getBytes()); + assertThat(db.getLongProperty("rocksdb.num-entries-active-mem-table")) + .isGreaterThan(0); + assertThat(db.getLongProperty("rocksdb.cur-size-active-mem-table")) + .isGreaterThan(0); + } + } + + @Test + public void fullCompactRange() throws RocksDBException { + try (final Options opt = new Options(). + setCreateIfMissing(true). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put((String.valueOf(i)).getBytes(), b); + } + db.compactRange(); + } + } + + @Test + public void fullCompactRangeColumnFamily() + throws RocksDBException { + try ( + final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)); + + // open database + final List columnFamilyHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, + columnFamilyHandles)) { + try { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put(columnFamilyHandles.get(1), + String.valueOf(i).getBytes(), b); + } + db.compactRange(columnFamilyHandles.get(1)); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void compactRangeWithKeys() + throws RocksDBException { + try (final Options opt = new Options(). + setCreateIfMissing(true). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put((String.valueOf(i)).getBytes(), b); + } + db.compactRange("0".getBytes(), "201".getBytes()); + } + } + + @Test + public void compactRangeWithKeysReduce() + throws RocksDBException { + try ( + final Options opt = new Options(). + setCreateIfMissing(true). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put((String.valueOf(i)).getBytes(), b); + } + db.flush(new FlushOptions().setWaitForFlush(true)); + try (final CompactRangeOptions compactRangeOpts = new CompactRangeOptions() + .setChangeLevel(true) + .setTargetLevel(-1) + .setTargetPathId(0)) { + db.compactRange(null, "0".getBytes(), "201".getBytes(), + compactRangeOpts); + } + } + } + + @Test + public void compactRangeWithKeysColumnFamily() + throws RocksDBException { + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + + // open database + final List columnFamilyHandles = + new ArrayList<>(); + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, + columnFamilyHandles)) { + try { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put(columnFamilyHandles.get(1), + String.valueOf(i).getBytes(), b); + } + db.compactRange(columnFamilyHandles.get(1), + "0".getBytes(), "201".getBytes()); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void compactRangeWithKeysReduceColumnFamily() + throws RocksDBException { + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + + final List columnFamilyHandles = new ArrayList<>(); + // open database + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, + columnFamilyHandles)) { + try (final CompactRangeOptions compactRangeOpts = new CompactRangeOptions() + .setChangeLevel(true) + .setTargetLevel(-1) + .setTargetPathId(0)) { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put(columnFamilyHandles.get(1), + String.valueOf(i).getBytes(), b); + } + db.compactRange(columnFamilyHandles.get(1), "0".getBytes(), + "201".getBytes(), compactRangeOpts); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void compactRangeToLevel() + throws RocksDBException, InterruptedException { + final int NUM_KEYS_PER_L0_FILE = 100; + final int KEY_SIZE = 20; + final int VALUE_SIZE = 300; + final int L0_FILE_SIZE = + NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE); + final int NUM_L0_FILES = 10; + final int TEST_SCALE = 5; + final int KEY_INTERVAL = 100; + try (final Options opt = new Options(). + setCreateIfMissing(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(5). + // a slightly bigger write buffer than L0 file + // so that we can ensure manual flush always + // go before background flush happens. + setWriteBufferSize(L0_FILE_SIZE * 2). + // Disable auto L0 -> L1 compaction + setLevelZeroFileNumCompactionTrigger(20). + setTargetFileSizeBase(L0_FILE_SIZE * 100). + setTargetFileSizeMultiplier(1). + // To disable auto compaction + setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100). + setMaxBytesForLevelMultiplier(2). + setDisableAutoCompactions(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()) + ) { + // fill database with key/value pairs + byte[] value = new byte[VALUE_SIZE]; + int int_key = 0; + for (int round = 0; round < 5; ++round) { + int initial_key = int_key; + for (int f = 1; f <= NUM_L0_FILES; ++f) { + for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) { + int_key += KEY_INTERVAL; + rand.nextBytes(value); + + db.put(String.format("%020d", int_key).getBytes(), + value); + } + db.flush(new FlushOptions().setWaitForFlush(true)); + // Make sure we do create one more L0 files. + assertThat( + db.getProperty("rocksdb.num-files-at-level0")). + isEqualTo("" + f); + } + + // Compact all L0 files we just created + db.compactRange( + String.format("%020d", initial_key).getBytes(), + String.format("%020d", int_key - 1).getBytes()); + // Making sure there isn't any L0 files. + assertThat( + db.getProperty("rocksdb.num-files-at-level0")). + isEqualTo("0"); + // Making sure there are some L1 files. + // Here we only use != 0 instead of a specific number + // as we don't want the test make any assumption on + // how compaction works. + assertThat( + db.getProperty("rocksdb.num-files-at-level1")). + isNotEqualTo("0"); + // Because we only compacted those keys we issued + // in this round, there shouldn't be any L1 -> L2 + // compaction. So we expect zero L2 files here. + assertThat( + db.getProperty("rocksdb.num-files-at-level2")). + isEqualTo("0"); + } + } + } + + @Test + public void deleteFilesInRange() throws RocksDBException, InterruptedException { + final int KEY_SIZE = 20; + final int VALUE_SIZE = 1000; + final int FILE_SIZE = 64000; + final int NUM_FILES = 10; + + final int KEY_INTERVAL = 10000; + /* + * Intention of these options is to end up reliably with 10 files + * we will be deleting using deleteFilesInRange. + * It is writing roughly number of keys that will fit in 10 files (target size) + * It is writing interleaved so that files from memory on L0 will overlap + * Then compaction cleans everything and we should end up with 10 files + */ + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setCompressionType(CompressionType.NO_COMPRESSION) + .setTargetFileSizeBase(FILE_SIZE) + .setWriteBufferSize(FILE_SIZE / 2) + .setDisableAutoCompactions(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + int records = FILE_SIZE / (KEY_SIZE + VALUE_SIZE); + + // fill database with key/value pairs + byte[] value = new byte[VALUE_SIZE]; + int key_init = 0; + for (int o = 0; o < NUM_FILES; ++o) { + int int_key = key_init++; + for (int i = 0; i < records; ++i) { + int_key += KEY_INTERVAL; + rand.nextBytes(value); + + db.put(String.format("%020d", int_key).getBytes(), value); + } + } + db.flush(new FlushOptions().setWaitForFlush(true)); + db.compactRange(); + // Make sure we do create one more L0 files. + assertThat(db.getProperty("rocksdb.num-files-at-level0")).isEqualTo("0"); + + // Should be 10, but we are OK with asserting +- 2 + int files = Integer.parseInt(db.getProperty("rocksdb.num-files-at-level1")); + assertThat(files).isBetween(8, 12); + + // Delete lower 60% (roughly). Result should be 5, but we are OK with asserting +- 2 + // Important is that we know something was deleted (JNI call did something) + // Exact assertions are done in C++ unit tests + db.deleteFilesInRanges(null, + Arrays.asList(null, String.format("%020d", records * KEY_INTERVAL * 6 / 10).getBytes()), + false); + files = Integer.parseInt(db.getProperty("rocksdb.num-files-at-level1")); + assertThat(files).isBetween(3, 7); + } + } + + @Test + public void compactRangeToLevelColumnFamily() + throws RocksDBException { + final int NUM_KEYS_PER_L0_FILE = 100; + final int KEY_SIZE = 20; + final int VALUE_SIZE = 300; + final int L0_FILE_SIZE = + NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE); + final int NUM_L0_FILES = 10; + final int TEST_SCALE = 5; + final int KEY_INTERVAL = 100; + + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(5). + // a slightly bigger write buffer than L0 file + // so that we can ensure manual flush always + // go before background flush happens. + setWriteBufferSize(L0_FILE_SIZE * 2). + // Disable auto L0 -> L1 compaction + setLevelZeroFileNumCompactionTrigger(20). + setTargetFileSizeBase(L0_FILE_SIZE * 100). + setTargetFileSizeMultiplier(1). + // To disable auto compaction + setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100). + setMaxBytesForLevelMultiplier(2). + setDisableAutoCompactions(true) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + + final List columnFamilyHandles = new ArrayList<>(); + // open database + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, + columnFamilyHandles)) { + try { + // fill database with key/value pairs + byte[] value = new byte[VALUE_SIZE]; + int int_key = 0; + for (int round = 0; round < 5; ++round) { + int initial_key = int_key; + for (int f = 1; f <= NUM_L0_FILES; ++f) { + for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) { + int_key += KEY_INTERVAL; + rand.nextBytes(value); + + db.put(columnFamilyHandles.get(1), + String.format("%020d", int_key).getBytes(), + value); + } + db.flush(new FlushOptions().setWaitForFlush(true), + columnFamilyHandles.get(1)); + // Make sure we do create one more L0 files. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level0")). + isEqualTo("" + f); + } + + // Compact all L0 files we just created + db.compactRange( + columnFamilyHandles.get(1), + String.format("%020d", initial_key).getBytes(), + String.format("%020d", int_key - 1).getBytes()); + // Making sure there isn't any L0 files. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level0")). + isEqualTo("0"); + // Making sure there are some L1 files. + // Here we only use != 0 instead of a specific number + // as we don't want the test make any assumption on + // how compaction works. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level1")). + isNotEqualTo("0"); + // Because we only compacted those keys we issued + // in this round, there shouldn't be any L1 -> L2 + // compaction. So we expect zero L2 files here. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level2")). + isEqualTo("0"); + } + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void continueBackgroundWorkAfterCancelAllBackgroundWork() throws RocksDBException { + final int KEY_SIZE = 20; + final int VALUE_SIZE = 300; + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions() + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + + final List columnFamilyHandles = new ArrayList<>(); + // open the database + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, + columnFamilyHandles)) { + try { + db.cancelAllBackgroundWork(true); + try { + db.put(new byte[KEY_SIZE], new byte[VALUE_SIZE]); + db.flush(new FlushOptions().setWaitForFlush(true)); + fail("Expected RocksDBException to be thrown if we attempt to trigger a flush after" + + " all background work is cancelled."); + } catch (RocksDBException ignored) { } + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void cancelAllBackgroundWorkTwice() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + // Cancel all background work synchronously + db.cancelAllBackgroundWork(true); + // Cancel all background work asynchronously + db.cancelAllBackgroundWork(false); + } + } + + @Test + public void pauseContinueBackgroundWork() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + db.pauseBackgroundWork(); + db.continueBackgroundWork(); + db.pauseBackgroundWork(); + db.continueBackgroundWork(); + } + } + + @Test + public void enableDisableFileDeletions() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + db.disableFileDeletions(); + db.enableFileDeletions(false); + db.disableFileDeletions(); + db.enableFileDeletions(true); + } + } + + @Test + public void setOptions() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions() + .setWriteBufferSize(4096)) { + + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)); + + // open database + final List columnFamilyHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) { + try { + final MutableColumnFamilyOptions mutableOptions = + MutableColumnFamilyOptions.builder() + .setWriteBufferSize(2048) + .build(); + + db.setOptions(columnFamilyHandles.get(1), mutableOptions); + + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void destroyDB() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.put("key1".getBytes(), "value".getBytes()); + } + assertThat(dbFolder.getRoot().exists() && dbFolder.getRoot().listFiles().length != 0) + .isTrue(); + RocksDB.destroyDB(dbPath, options); + assertThat(dbFolder.getRoot().exists() && dbFolder.getRoot().listFiles().length != 0) + .isFalse(); + } + } + + @Test(expected = RocksDBException.class) + public void destroyDBFailIfOpen() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + // Fails as the db is open and locked. + RocksDB.destroyDB(dbPath, options); + } + } + } + + @Test + public void getApproximateSizes() throws RocksDBException { + final byte key1[] = "key1".getBytes(UTF_8); + final byte key2[] = "key2".getBytes(UTF_8); + final byte key3[] = "key3".getBytes(UTF_8); + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.put(key1, key1); + db.put(key2, key2); + db.put(key3, key3); + + final long[] sizes = db.getApproximateSizes( + Arrays.asList( + new Range(new Slice(key1), new Slice(key1)), + new Range(new Slice(key2), new Slice(key3)) + ), + SizeApproximationFlag.INCLUDE_FILES, + SizeApproximationFlag.INCLUDE_MEMTABLES); + + assertThat(sizes.length).isEqualTo(2); + assertThat(sizes[0]).isEqualTo(0); + assertThat(sizes[1]).isGreaterThanOrEqualTo(1); + } + } + } + + @Test + public void getApproximateMemTableStats() throws RocksDBException { + final byte key1[] = "key1".getBytes(UTF_8); + final byte key2[] = "key2".getBytes(UTF_8); + final byte key3[] = "key3".getBytes(UTF_8); + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.put(key1, key1); + db.put(key2, key2); + db.put(key3, key3); + + final RocksDB.CountAndSize stats = + db.getApproximateMemTableStats( + new Range(new Slice(key1), new Slice(key3))); + + assertThat(stats).isNotNull(); + assertThat(stats.count).isGreaterThan(1); + assertThat(stats.size).isGreaterThan(1); + } + } + } + + @Test + public void getApproximateMemTableStatsSingleKey() throws RocksDBException { + final byte key1[] = "key1".getBytes(UTF_8); + final byte key2[] = "key2".getBytes(UTF_8); + final byte key3[] = "key3".getBytes(UTF_8); + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.put(key1, key1); + + final RocksDB.CountAndSize stats = + db.getApproximateMemTableStats(new Range(new Slice(key1), new Slice(key3))); + + assertThat(stats).isNotNull(); + assertThat(stats.count).isEqualTo(1); + assertThat(stats.size).isGreaterThan(1); + } + } + } + + @Ignore("TODO(AR) re-enable when ready!") + @Test + public void compactFiles() throws RocksDBException { + final int kTestKeySize = 16; + final int kTestValueSize = 984; + final int kEntrySize = kTestKeySize + kTestValueSize; + final int kEntriesPerBuffer = 100; + final int writeBufferSize = kEntrySize * kEntriesPerBuffer; + final byte[] cfName = "pikachu".getBytes(UTF_8); + + try (final Options options = new Options() + .setCreateIfMissing(true) + .setWriteBufferSize(writeBufferSize) + .setCompactionStyle(CompactionStyle.LEVEL) + .setTargetFileSizeBase(writeBufferSize) + .setMaxBytesForLevelBase(writeBufferSize * 2) + .setLevel0StopWritesTrigger(2) + .setMaxBytesForLevelMultiplier(2) + .setCompressionType(CompressionType.NO_COMPRESSION) + .setMaxSubcompactions(4)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath); + final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions(options)) { + db.createColumnFamily(new ColumnFamilyDescriptor(cfName, + cfOptions)).close(); + } + + try (final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions(options)) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOptions), + new ColumnFamilyDescriptor(cfName, cfOptions) + ); + final List cfHandles = new ArrayList<>(); + try (final DBOptions dbOptions = new DBOptions(options); + final RocksDB db = RocksDB.open(dbOptions, dbPath, cfDescriptors, + cfHandles); + ) { + try (final FlushOptions flushOptions = new FlushOptions() + .setWaitForFlush(true) + .setAllowWriteStall(true); + final CompactionOptions compactionOptions = new CompactionOptions()) { + final Random rnd = new Random(301); + for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) { + final byte[] value = new byte[kTestValueSize]; + rnd.nextBytes(value); + db.put(cfHandles.get(1), Integer.toString(key).getBytes(UTF_8), + value); + } + db.flush(flushOptions, cfHandles); + + final RocksDB.LiveFiles liveFiles = db.getLiveFiles(); + final List compactedFiles = + db.compactFiles(compactionOptions, cfHandles.get(1), + liveFiles.files, 1, -1, null); + assertThat(compactedFiles).isNotEmpty(); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + } + + @Test + public void enableAutoCompaction() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true)) { + final List cfDescs = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) + ); + final List cfHandles = new ArrayList<>(); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { + try { + db.enableAutoCompaction(cfHandles); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + + @Test + public void numberLevels() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db.numberLevels()).isEqualTo(7); + } + } + } + + @Test + public void maxMemCompactionLevel() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db.maxMemCompactionLevel()).isEqualTo(0); + } + } + } + + @Test + public void level0StopWriteTrigger() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db.level0StopWriteTrigger()).isEqualTo(36); + } + } + } + + @Test + public void getName() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db.getName()).isEqualTo(dbPath); + } + } + } + + @Test + public void getEnv() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + assertThat(db.getEnv()).isEqualTo(Env.getDefault()); + } + } + } + + @Test + public void flush() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath); + final FlushOptions flushOptions = new FlushOptions()) { + db.flush(flushOptions); + } + } + } + + @Test + public void flushWal() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.flushWal(true); + } + } + } + + @Test + public void syncWal() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.syncWal(); + } + } + } + + @Test + public void getLiveFiles() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + final RocksDB.LiveFiles livefiles = db.getLiveFiles(true); + assertThat(livefiles).isNotNull(); + assertThat(livefiles.manifestFileSize).isEqualTo(59); + assertThat(livefiles.files.size()).isEqualTo(3); + assertThat(livefiles.files.get(0)).isEqualTo("/CURRENT"); + assertThat(livefiles.files.get(1)).isEqualTo("/MANIFEST-000005"); + assertThat(livefiles.files.get(2)).isEqualTo("/OPTIONS-000007"); + } + } + } + + @Test + public void getSortedWalFiles() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + final List logFiles = db.getSortedWalFiles(); + assertThat(logFiles).isNotNull(); + assertThat(logFiles.size()).isEqualTo(1); + assertThat(logFiles.get(0).type()) + .isEqualTo(WalFileType.kAliveLogFile); + } + } + } + + @Test + public void deleteFile() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.deleteFile("unknown"); + } + } + } + + @Test + public void getLiveFilesMetaData() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + final List liveFilesMetaData + = db.getLiveFilesMetaData(); + assertThat(liveFilesMetaData).isEmpty(); + } + } + } + + @Test + public void getColumnFamilyMetaData() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true)) { + final List cfDescs = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) + ); + final List cfHandles = new ArrayList<>(); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { + db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + try { + final ColumnFamilyMetaData cfMetadata = + db.getColumnFamilyMetaData(cfHandles.get(0)); + assertThat(cfMetadata).isNotNull(); + assertThat(cfMetadata.name()).isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY); + assertThat(cfMetadata.levels().size()).isEqualTo(7); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + + @Test + public void verifyChecksum() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.verifyChecksum(); + } + } + } + + @Test + public void getPropertiesOfAllTables() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true)) { + final List cfDescs = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) + ); + final List cfHandles = new ArrayList<>(); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { + db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + try { + final Map properties = + db.getPropertiesOfAllTables(cfHandles.get(0)); + assertThat(properties).isNotNull(); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + + @Test + public void getPropertiesOfTablesInRange() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true)) { + final List cfDescs = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) + ); + final List cfHandles = new ArrayList<>(); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { + db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + db.put(cfHandles.get(0), "key2".getBytes(UTF_8), "value2".getBytes(UTF_8)); + db.put(cfHandles.get(0), "key3".getBytes(UTF_8), "value3".getBytes(UTF_8)); + try { + final Range range = new Range( + new Slice("key1".getBytes(UTF_8)), + new Slice("key3".getBytes(UTF_8))); + final Map properties = + db.getPropertiesOfTablesInRange( + cfHandles.get(0), Arrays.asList(range)); + assertThat(properties).isNotNull(); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + + @Test + public void suggestCompactRange() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true)) { + final List cfDescs = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY) + ); + final List cfHandles = new ArrayList<>(); + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath, cfDescs, cfHandles)) { + db.put(cfHandles.get(0), "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + db.put(cfHandles.get(0), "key2".getBytes(UTF_8), "value2".getBytes(UTF_8)); + db.put(cfHandles.get(0), "key3".getBytes(UTF_8), "value3".getBytes(UTF_8)); + try { + final Range range = db.suggestCompactRange(cfHandles.get(0)); + assertThat(range).isNotNull(); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } + + @Test + public void promoteL0() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + db.promoteL0(2); + } + } + } + + @Test + public void startTrace() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true)) { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + try (final RocksDB db = RocksDB.open(options, dbPath)) { + final TraceOptions traceOptions = new TraceOptions(); + + try (final InMemoryTraceWriter traceWriter = new InMemoryTraceWriter()) { + db.startTrace(traceOptions, traceWriter); + + db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + + db.endTrace(); + + final List writes = traceWriter.getWrites(); + assertThat(writes.size()).isGreaterThan(0); + } + } + } + } + + @Test + public void setDBOptions() throws RocksDBException { + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions() + .setWriteBufferSize(4096)) { + + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)); + + // open database + final List columnFamilyHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, columnFamilyHandles)) { + try { + final MutableDBOptions mutableOptions = + MutableDBOptions.builder() + .setBytesPerSync(1024 * 1027 * 7) + .setAvoidFlushDuringShutdown(false) + .build(); + + db.setDBOptions(mutableOptions); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void rocksdbVersion() { + final RocksDB.Version version = RocksDB.rocksdbVersion(); + assertThat(version).isNotNull(); + assertThat(version.getMajor()).isGreaterThan(1); + } + + private static class InMemoryTraceWriter extends AbstractTraceWriter { + private final List writes = new ArrayList<>(); + private volatile boolean closed = false; + + @Override + public void write(final Slice slice) { + if (closed) { + return; + } + final byte[] data = slice.data(); + final byte[] dataCopy = new byte[data.length]; + System.arraycopy(data, 0, dataCopy, 0, data.length); + writes.add(dataCopy); + } + + @Override + public void closeWriter() { + closed = true; + } + + @Override + public long getFileSize() { + long size = 0; + for (int i = 0; i < writes.size(); i++) { + size += writes.get(i).length; + } + return size; + } + + public List getWrites() { + return writes; + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java new file mode 100644 index 000000000..2a13550b7 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java @@ -0,0 +1,289 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class RocksIteratorTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + private void validateByteBufferResult( + final int fetched, final ByteBuffer byteBuffer, final String expected) { + assertThat(fetched).isEqualTo(expected.length()); + assertThat(byteBuffer.position()).isEqualTo(0); + assertThat(byteBuffer.limit()).isEqualTo(Math.min(byteBuffer.remaining(), expected.length())); + final int bufferSpace = byteBuffer.remaining(); + final byte[] contents = new byte[bufferSpace]; + byteBuffer.get(contents, 0, bufferSpace); + assertThat(contents).isEqualTo( + expected.substring(0, bufferSpace).getBytes(StandardCharsets.UTF_8)); + } + + private void validateKey( + final RocksIterator iterator, final ByteBuffer byteBuffer, final String key) { + validateByteBufferResult(iterator.key(byteBuffer), byteBuffer, key); + } + + private void validateValue( + final RocksIterator iterator, final ByteBuffer byteBuffer, final String value) { + validateByteBufferResult(iterator.value(byteBuffer), byteBuffer, value); + } + + @Test + public void rocksIterator() throws RocksDBException { + try (final Options options = + new Options().setCreateIfMissing(true).setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1".getBytes()); + db.put("key2".getBytes(), "value2".getBytes()); + + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + + validateKey(iterator, ByteBuffer.allocateDirect(2), "key1"); + validateKey(iterator, ByteBuffer.allocateDirect(2), "key0"); + validateKey(iterator, ByteBuffer.allocateDirect(4), "key1"); + validateKey(iterator, ByteBuffer.allocateDirect(5), "key1"); + validateValue(iterator, ByteBuffer.allocateDirect(2), "value2"); + validateValue(iterator, ByteBuffer.allocateDirect(2), "vasicu"); + validateValue(iterator, ByteBuffer.allocateDirect(8), "value1"); + + validateKey(iterator, ByteBuffer.allocate(2), "key1"); + validateKey(iterator, ByteBuffer.allocate(2), "key0"); + validateKey(iterator, ByteBuffer.allocate(4), "key1"); + validateKey(iterator, ByteBuffer.allocate(5), "key1"); + validateValue(iterator, ByteBuffer.allocate(2), "value1"); + validateValue(iterator, ByteBuffer.allocate(8), "value1"); + + iterator.next(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + assertThat(iterator.value()).isEqualTo("value2".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isFalse(); + iterator.seekToLast(); + iterator.prev(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + iterator.seekToFirst(); + iterator.seekToLast(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + assertThat(iterator.value()).isEqualTo("value2".getBytes()); + iterator.status(); + + { + final ByteBuffer key = ByteBuffer.allocate(12); + key.put("key1".getBytes()).flip(); + iterator.seek(key); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + assertThat(key.position()).isEqualTo(4); + assertThat(key.limit()).isEqualTo(4); + + validateValue(iterator, ByteBuffer.allocateDirect(12), "value1"); + validateValue(iterator, ByteBuffer.allocateDirect(4), "valu56"); + } + + { + final ByteBuffer key = ByteBuffer.allocate(12); + key.put("key2".getBytes()).flip(); + iterator.seekForPrev(key); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.value()).isEqualTo("value2".getBytes()); + assertThat(key.position()).isEqualTo(4); + assertThat(key.limit()).isEqualTo(4); + } + + { + final ByteBuffer key = ByteBuffer.allocate(12); + key.put("key1".getBytes()).flip(); + iterator.seek(key); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + assertThat(key.position()).isEqualTo(4); + assertThat(key.limit()).isEqualTo(4); + } + + { + // Check offsets of slice byte buffers + final ByteBuffer key0 = ByteBuffer.allocate(24); + key0.put("key2key2".getBytes()); + final ByteBuffer key = key0.slice(); + key.put("key1".getBytes()).flip(); + iterator.seek(key); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + assertThat(key.position()).isEqualTo(4); + assertThat(key.limit()).isEqualTo(4); + } + + { + // Check offsets of slice byte buffers + final ByteBuffer key0 = ByteBuffer.allocateDirect(24); + key0.put("key2key2".getBytes()); + final ByteBuffer key = key0.slice(); + key.put("key1".getBytes()).flip(); + iterator.seek(key); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + assertThat(key.position()).isEqualTo(4); + assertThat(key.limit()).isEqualTo(4); + } + + { + final ByteBuffer key = ByteBuffer.allocate(12); + key.put("key2".getBytes()).flip(); + iterator.seekForPrev(key); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.value()).isEqualTo("value2".getBytes()); + assertThat(key.position()).isEqualTo(4); + assertThat(key.limit()).isEqualTo(4); + } + } + } + } + + @Test + public void rocksIteratorSeekAndInsert() throws RocksDBException { + try (final Options options = + new Options().setCreateIfMissing(true).setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1".getBytes()); + db.put("key2".getBytes(), "value2".getBytes()); + + try (final RocksIterator iterator = db.newIterator()) { + iterator.seek("key0".getBytes()); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + + iterator.seek("key1".getBytes()); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + + iterator.seek("key1.5".getBytes()); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + + iterator.seek("key2".getBytes()); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + + iterator.seek("key3".getBytes()); + assertThat(iterator.isValid()).isFalse(); + } + + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekForPrev("key0".getBytes()); + assertThat(iterator.isValid()).isFalse(); + + iterator.seekForPrev("key1".getBytes()); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + + iterator.seekForPrev("key1.5".getBytes()); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + + iterator.seekForPrev("key2".getBytes()); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + + iterator.seekForPrev("key3".getBytes()); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + } + + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + + byte[] lastKey; + do { + lastKey = iterator.key(); + iterator.next(); + } while (iterator.isValid()); + + db.put("key3".getBytes(), "value3".getBytes()); + assertThat(iterator.isValid()).isFalse(); + iterator.refresh(); + iterator.seek(lastKey); + assertThat(iterator.isValid()).isTrue(); + + iterator.next(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key3".getBytes()); + } + } + } + + @Test + public void rocksIteratorReleaseAfterCfClose() throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + this.dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + + // Test case: release iterator after default CF close + try (final RocksIterator iterator = db.newIterator()) { + // In fact, calling close() on default CF has no effect + db.getDefaultColumnFamily().close(); + + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key".getBytes()); + assertThat(iterator.value()).isEqualTo("value".getBytes()); + } + + // Test case: release iterator after custom CF close + final ColumnFamilyDescriptor cfd1 = new ColumnFamilyDescriptor("cf1".getBytes()); + final ColumnFamilyHandle cfHandle1 = db.createColumnFamily(cfd1); + db.put(cfHandle1, "key1".getBytes(), "value1".getBytes()); + + try (final RocksIterator iterator = db.newIterator(cfHandle1)) { + cfHandle1.close(); + + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + } + + // Test case: release iterator after custom CF drop & close + final ColumnFamilyDescriptor cfd2 = new ColumnFamilyDescriptor("cf2".getBytes()); + final ColumnFamilyHandle cfHandle2 = db.createColumnFamily(cfd2); + db.put(cfHandle2, "key2".getBytes(), "value2".getBytes()); + + try (final RocksIterator iterator = db.newIterator(cfHandle2)) { + db.dropColumnFamily(cfHandle2); + cfHandle2.close(); + + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + assertThat(iterator.value()).isEqualTo("value2".getBytes()); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java new file mode 100644 index 000000000..cce0c61e0 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java @@ -0,0 +1,141 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class RocksMemEnvTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void memEnvFillAndReopen() throws RocksDBException { + + final byte[][] keys = { + "aaa".getBytes(), + "bbb".getBytes(), + "ccc".getBytes() + }; + + final byte[][] values = { + "foo".getBytes(), + "bar".getBytes(), + "baz".getBytes() + }; + + try (final Env env = new RocksMemEnv(Env.getDefault()); + final Options options = new Options() + .setCreateIfMissing(true) + .setEnv(env); + final FlushOptions flushOptions = new FlushOptions() + .setWaitForFlush(true); + ) { + try (final RocksDB db = RocksDB.open(options, "/dir/db")) { + // write key/value pairs using MemEnv + for (int i = 0; i < keys.length; i++) { + db.put(keys[i], values[i]); + } + + // read key/value pairs using MemEnv + for (int i = 0; i < keys.length; i++) { + assertThat(db.get(keys[i])).isEqualTo(values[i]); + } + + // Check iterator access + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekToFirst(); + for (int i = 0; i < keys.length; i++) { + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo(keys[i]); + assertThat(iterator.value()).isEqualTo(values[i]); + iterator.next(); + } + // reached end of database + assertThat(iterator.isValid()).isFalse(); + } + + // flush + db.flush(flushOptions); + + // read key/value pairs after flush using MemEnv + for (int i = 0; i < keys.length; i++) { + assertThat(db.get(keys[i])).isEqualTo(values[i]); + } + } + + options.setCreateIfMissing(false); + + // After reopen the values shall still be in the mem env. + // as long as the env is not freed. + try (final RocksDB db = RocksDB.open(options, "/dir/db")) { + // read key/value pairs using MemEnv + for (int i = 0; i < keys.length; i++) { + assertThat(db.get(keys[i])).isEqualTo(values[i]); + } + } + } + } + + @Test + public void multipleDatabaseInstances() throws RocksDBException { + // db - keys + final byte[][] keys = { + "aaa".getBytes(), + "bbb".getBytes(), + "ccc".getBytes() + }; + // otherDb - keys + final byte[][] otherKeys = { + "111".getBytes(), + "222".getBytes(), + "333".getBytes() + }; + // values + final byte[][] values = { + "foo".getBytes(), + "bar".getBytes(), + "baz".getBytes() + }; + + try (final Env env = new RocksMemEnv(Env.getDefault()); + final Options options = new Options().setCreateIfMissing(true).setEnv(env); + final RocksDB db = RocksDB.open(options, "/dir/db"); + final RocksDB otherDb = RocksDB.open(options, "/dir/otherDb")) { + // write key/value pairs using MemEnv + // to db and to otherDb. + for (int i = 0; i < keys.length; i++) { + db.put(keys[i], values[i]); + otherDb.put(otherKeys[i], values[i]); + } + + // verify key/value pairs after flush using MemEnv + for (int i = 0; i < keys.length; i++) { + // verify db + assertThat(db.get(otherKeys[i])).isNull(); + assertThat(db.get(keys[i])).isEqualTo(values[i]); + + // verify otherDb + assertThat(otherDb.get(keys[i])).isNull(); + assertThat(otherDb.get(otherKeys[i])).isEqualTo(values[i]); + } + } + } + + @Test(expected = RocksDBException.class) + public void createIfMissingFalse() throws RocksDBException { + try (final Env env = new RocksMemEnv(Env.getDefault()); + final Options options = new Options().setCreateIfMissing(false).setEnv(env); + final RocksDB db = RocksDB.open(options, "/db/dir")) { + // shall throw an exception because db dir does not + // exist. + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java b/src/rocksdb/java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java new file mode 100644 index 000000000..6116f2f92 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/RocksNativeLibraryResource.java @@ -0,0 +1,18 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.rules.ExternalResource; + +/** + * Resource to load the RocksDB JNI library. + */ +public class RocksNativeLibraryResource extends ExternalResource { + @Override + protected void before() { + RocksDB.loadLibrary(); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SecondaryDBTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SecondaryDBTest.java new file mode 100644 index 000000000..557d4a47d --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/SecondaryDBTest.java @@ -0,0 +1,135 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.ArrayList; +import java.util.List; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class SecondaryDBTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Rule public TemporaryFolder secondaryDbFolder = new TemporaryFolder(); + + @Test + public void openAsSecondary() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1".getBytes()); + db.put("key2".getBytes(), "value2".getBytes()); + db.put("key3".getBytes(), "value3".getBytes()); + + // open secondary + try (final Options secondaryOptions = new Options(); + final RocksDB secondaryDb = + RocksDB.openAsSecondary(secondaryOptions, dbFolder.getRoot().getAbsolutePath(), + secondaryDbFolder.getRoot().getAbsolutePath())) { + assertThat(secondaryDb.get("key1".getBytes())).isEqualTo("value1".getBytes()); + assertThat(secondaryDb.get("key2".getBytes())).isEqualTo("value2".getBytes()); + assertThat(secondaryDb.get("key3".getBytes())).isEqualTo("value3".getBytes()); + + // write to primary + db.put("key4".getBytes(), "value4".getBytes()); + db.put("key5".getBytes(), "value5".getBytes()); + db.put("key6".getBytes(), "value6".getBytes()); + + // tell secondary to catch up + secondaryDb.tryCatchUpWithPrimary(); + + db.put("key7".getBytes(), "value7".getBytes()); + + // check secondary + assertThat(secondaryDb.get("key4".getBytes())).isEqualTo("value4".getBytes()); + assertThat(secondaryDb.get("key5".getBytes())).isEqualTo("value5".getBytes()); + assertThat(secondaryDb.get("key6".getBytes())).isEqualTo("value6".getBytes()); + + assertThat(secondaryDb.get("key7".getBytes())).isNull(); + } + } + } + + @Test + public void openAsSecondaryColumnFamilies() throws RocksDBException { + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = new ArrayList<>(); + cfDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); + cfDescriptors.add(new ColumnFamilyDescriptor("cf1".getBytes(), cfOpts)); + + final List cfHandles = new ArrayList<>(); + + try (final DBOptions options = + new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open( + options, dbFolder.getRoot().getAbsolutePath(), cfDescriptors, cfHandles)) { + try { + final ColumnFamilyHandle cf1 = cfHandles.get(1); + + db.put(cf1, "key1".getBytes(), "value1".getBytes()); + db.put(cf1, "key2".getBytes(), "value2".getBytes()); + db.put(cf1, "key3".getBytes(), "value3".getBytes()); + + final List secondaryCfHandles = new ArrayList<>(); + + // open secondary + try (final DBOptions secondaryOptions = new DBOptions(); + final RocksDB secondaryDb = + RocksDB.openAsSecondary(secondaryOptions, dbFolder.getRoot().getAbsolutePath(), + secondaryDbFolder.getRoot().getAbsolutePath(), cfDescriptors, + secondaryCfHandles)) { + try { + final ColumnFamilyHandle secondaryCf1 = secondaryCfHandles.get(1); + + assertThat(secondaryDb.get(secondaryCf1, "key1".getBytes())) + .isEqualTo("value1".getBytes()); + assertThat(secondaryDb.get(secondaryCf1, "key2".getBytes())) + .isEqualTo("value2".getBytes()); + assertThat(secondaryDb.get(secondaryCf1, "key3".getBytes())) + .isEqualTo("value3".getBytes()); + + // write to primary + db.put(cf1, "key4".getBytes(), "value4".getBytes()); + db.put(cf1, "key5".getBytes(), "value5".getBytes()); + db.put(cf1, "key6".getBytes(), "value6".getBytes()); + + // tell secondary to catch up + secondaryDb.tryCatchUpWithPrimary(); + + db.put(cf1, "key7".getBytes(), "value7".getBytes()); + + // check secondary + assertThat(secondaryDb.get(secondaryCf1, "key4".getBytes())) + .isEqualTo("value4".getBytes()); + assertThat(secondaryDb.get(secondaryCf1, "key5".getBytes())) + .isEqualTo("value5".getBytes()); + assertThat(secondaryDb.get(secondaryCf1, "key6".getBytes())) + .isEqualTo("value6".getBytes()); + + assertThat(secondaryDb.get(secondaryCf1, "key7".getBytes())).isNull(); + + } finally { + for (final ColumnFamilyHandle secondaryCfHandle : secondaryCfHandles) { + secondaryCfHandle.close(); + } + } + } + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + } + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java new file mode 100644 index 000000000..c65b01903 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java @@ -0,0 +1,80 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class SliceTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void slice() { + try (final Slice slice = new Slice("testSlice")) { + assertThat(slice.empty()).isFalse(); + assertThat(slice.size()).isEqualTo(9); + assertThat(slice.data()).isEqualTo("testSlice".getBytes()); + } + + try (final Slice otherSlice = new Slice("otherSlice".getBytes())) { + assertThat(otherSlice.data()).isEqualTo("otherSlice".getBytes()); + } + + try (final Slice thirdSlice = new Slice("otherSlice".getBytes(), 5)) { + assertThat(thirdSlice.data()).isEqualTo("Slice".getBytes()); + } + } + + @Test + public void sliceClear() { + try (final Slice slice = new Slice("abc")) { + assertThat(slice.toString()).isEqualTo("abc"); + slice.clear(); + assertThat(slice.toString()).isEmpty(); + slice.clear(); // make sure we don't double-free + } + } + + @Test + public void sliceRemovePrefix() { + try (final Slice slice = new Slice("abc")) { + assertThat(slice.toString()).isEqualTo("abc"); + slice.removePrefix(1); + assertThat(slice.toString()).isEqualTo("bc"); + } + } + + @Test + public void sliceEquals() { + try (final Slice slice = new Slice("abc"); + final Slice slice2 = new Slice("abc")) { + assertThat(slice.equals(slice2)).isTrue(); + assertThat(slice.hashCode() == slice2.hashCode()).isTrue(); + } + } + + @Test + public void sliceStartWith() { + try (final Slice slice = new Slice("matchpoint"); + final Slice match = new Slice("mat"); + final Slice noMatch = new Slice("nomatch")) { + assertThat(slice.startsWith(match)).isTrue(); + assertThat(slice.startsWith(noMatch)).isFalse(); + } + } + + @Test + public void sliceToString() { + try (final Slice slice = new Slice("stringTest")) { + assertThat(slice.toString()).isEqualTo("stringTest"); + assertThat(slice.toString(true)).isNotEqualTo(""); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java new file mode 100644 index 000000000..11f0d560a --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java @@ -0,0 +1,169 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class SnapshotTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void snapshots() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + // Get new Snapshot of database + try (final Snapshot snapshot = db.getSnapshot()) { + assertThat(snapshot.getSequenceNumber()).isGreaterThan(0); + assertThat(snapshot.getSequenceNumber()).isEqualTo(1); + try (final ReadOptions readOptions = new ReadOptions()) { + // set snapshot in ReadOptions + readOptions.setSnapshot(snapshot); + + // retrieve key value pair + assertThat(new String(db.get("key".getBytes()))). + isEqualTo("value"); + // retrieve key value pair created before + // the snapshot was made + assertThat(new String(db.get(readOptions, + "key".getBytes()))).isEqualTo("value"); + // add new key/value pair + db.put("newkey".getBytes(), "newvalue".getBytes()); + // using no snapshot the latest db entries + // will be taken into account + assertThat(new String(db.get("newkey".getBytes()))). + isEqualTo("newvalue"); + // snapshopot was created before newkey + assertThat(db.get(readOptions, "newkey".getBytes())). + isNull(); + // Retrieve snapshot from read options + try (final Snapshot sameSnapshot = readOptions.snapshot()) { + readOptions.setSnapshot(sameSnapshot); + // results must be the same with new Snapshot + // instance using the same native pointer + assertThat(new String(db.get(readOptions, + "key".getBytes()))).isEqualTo("value"); + // update key value pair to newvalue + db.put("key".getBytes(), "newvalue".getBytes()); + // read with previously created snapshot will + // read previous version of key value pair + assertThat(new String(db.get(readOptions, + "key".getBytes()))).isEqualTo("value"); + // read for newkey using the snapshot must be + // null + assertThat(db.get(readOptions, "newkey".getBytes())). + isNull(); + // setting null to snapshot in ReadOptions leads + // to no Snapshot being used. + readOptions.setSnapshot(null); + assertThat(new String(db.get(readOptions, + "newkey".getBytes()))).isEqualTo("newvalue"); + // release Snapshot + db.releaseSnapshot(snapshot); + } + } + } + } + } + + @Test + public void iteratorWithSnapshot() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + + // Get new Snapshot of database + // set snapshot in ReadOptions + try (final Snapshot snapshot = db.getSnapshot(); + final ReadOptions readOptions = + new ReadOptions().setSnapshot(snapshot)) { + db.put("key2".getBytes(), "value2".getBytes()); + + // iterate over current state of db + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isFalse(); + } + + // iterate using a snapshot + try (final RocksIterator snapshotIterator = + db.newIterator(readOptions)) { + snapshotIterator.seekToFirst(); + assertThat(snapshotIterator.isValid()).isTrue(); + assertThat(snapshotIterator.key()).isEqualTo("key".getBytes()); + snapshotIterator.next(); + assertThat(snapshotIterator.isValid()).isFalse(); + } + + // release Snapshot + db.releaseSnapshot(snapshot); + } + } + } + + @Test + public void iteratorWithSnapshotOnColumnFamily() throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + db.put("key".getBytes(), "value".getBytes()); + + // Get new Snapshot of database + // set snapshot in ReadOptions + try (final Snapshot snapshot = db.getSnapshot(); + final ReadOptions readOptions = new ReadOptions() + .setSnapshot(snapshot)) { + db.put("key2".getBytes(), "value2".getBytes()); + + // iterate over current state of column family + try (final RocksIterator iterator = db.newIterator( + db.getDefaultColumnFamily())) { + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isFalse(); + } + + // iterate using a snapshot on default column family + try (final RocksIterator snapshotIterator = db.newIterator( + db.getDefaultColumnFamily(), readOptions)) { + snapshotIterator.seekToFirst(); + assertThat(snapshotIterator.isValid()).isTrue(); + assertThat(snapshotIterator.key()).isEqualTo("key".getBytes()); + snapshotIterator.next(); + assertThat(snapshotIterator.isValid()).isFalse(); + + // release Snapshot + db.releaseSnapshot(snapshot); + } + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SstFileManagerTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileManagerTest.java new file mode 100644 index 000000000..2e136e820 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileManagerTest.java @@ -0,0 +1,66 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import java.util.Collections; + +import static org.assertj.core.api.Assertions.*; + +public class SstFileManagerTest { + + @Test + public void maxAllowedSpaceUsage() throws RocksDBException { + try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { + sstFileManager.setMaxAllowedSpaceUsage(1024 * 1024 * 64); + assertThat(sstFileManager.isMaxAllowedSpaceReached()).isFalse(); + assertThat(sstFileManager.isMaxAllowedSpaceReachedIncludingCompactions()).isFalse(); + } + } + + @Test + public void compactionBufferSize() throws RocksDBException { + try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { + sstFileManager.setCompactionBufferSize(1024 * 1024 * 10); + assertThat(sstFileManager.isMaxAllowedSpaceReachedIncludingCompactions()).isFalse(); + } + } + + @Test + public void totalSize() throws RocksDBException { + try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { + assertThat(sstFileManager.getTotalSize()).isEqualTo(0); + } + } + + @Test + public void trackedFiles() throws RocksDBException { + try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { + assertThat(sstFileManager.getTrackedFiles()).isEqualTo(Collections.emptyMap()); + } + } + + @Test + public void deleteRateBytesPerSecond() throws RocksDBException { + try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { + assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(SstFileManager.RATE_BYTES_PER_SEC_DEFAULT); + final long ratePerSecond = 1024 * 1024 * 52; + sstFileManager.setDeleteRateBytesPerSecond(ratePerSecond); + assertThat(sstFileManager.getDeleteRateBytesPerSecond()).isEqualTo(ratePerSecond); + } + } + + @Test + public void maxTrashDBRatio() throws RocksDBException { + try (final SstFileManager sstFileManager = new SstFileManager(Env.getDefault())) { + assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(SstFileManager.MAX_TRASH_DB_RATION_DEFAULT); + final double trashRatio = 0.2; + sstFileManager.setMaxTrashDBRatio(trashRatio); + assertThat(sstFileManager.getMaxTrashDBRatio()).isEqualTo(trashRatio); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SstFileReaderTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileReaderTest.java new file mode 100644 index 000000000..e29df99f2 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileReaderTest.java @@ -0,0 +1,222 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.rocksdb.util.ByteBufferAllocator; + +@RunWith(Parameterized.class) +public class SstFileReaderTest { + private static final String SST_FILE_NAME = "test.sst"; + + static class KeyValueWithOp { + KeyValueWithOp(final String key, final String value, final OpType opType) { + this.key = key; + this.value = value; + this.opType = opType; + } + + String getKey() { + return key; + } + + String getValue() { + return value; + } + + OpType getOpType() { + return opType; + } + + private final String key; + private final String value; + private final OpType opType; + } + + @Rule public TemporaryFolder parentFolder = new TemporaryFolder(); + + @Parameterized.Parameters(name = "{0}") + public static Iterable parameters() { + return Arrays.asList(new Object[][] { + {"direct", ByteBufferAllocator.DIRECT}, {"indirect", ByteBufferAllocator.HEAP}}); + } + + @Parameterized.Parameter(0) public String name; + + @Parameterized.Parameter(1) public ByteBufferAllocator byteBufferAllocator; + + enum OpType { PUT, PUT_BYTES, MERGE, MERGE_BYTES, DELETE, DELETE_BYTES } + + private File newSstFile(final List keyValues) + throws IOException, RocksDBException { + final EnvOptions envOptions = new EnvOptions(); + final StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final Options options = new Options().setMergeOperator(stringAppendOperator); + final SstFileWriter sstFileWriter; + sstFileWriter = new SstFileWriter(envOptions, options); + + final File sstFile = parentFolder.newFile(SST_FILE_NAME); + try { + sstFileWriter.open(sstFile.getAbsolutePath()); + for (final KeyValueWithOp keyValue : keyValues) { + final Slice keySlice = new Slice(keyValue.getKey()); + final Slice valueSlice = new Slice(keyValue.getValue()); + final byte[] keyBytes = keyValue.getKey().getBytes(); + final byte[] valueBytes = keyValue.getValue().getBytes(); + switch (keyValue.getOpType()) { + case PUT: + sstFileWriter.put(keySlice, valueSlice); + break; + case PUT_BYTES: + sstFileWriter.put(keyBytes, valueBytes); + break; + case MERGE: + sstFileWriter.merge(keySlice, valueSlice); + break; + case MERGE_BYTES: + sstFileWriter.merge(keyBytes, valueBytes); + break; + case DELETE: + sstFileWriter.delete(keySlice); + break; + case DELETE_BYTES: + sstFileWriter.delete(keyBytes); + break; + default: + fail("Unsupported op type"); + } + keySlice.close(); + valueSlice.close(); + } + sstFileWriter.finish(); + } finally { + assertThat(sstFileWriter).isNotNull(); + sstFileWriter.close(); + options.close(); + envOptions.close(); + } + return sstFile; + } + + @Test + public void readSstFile() throws RocksDBException, IOException { + final List keyValues = new ArrayList<>(); + keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT)); + keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT)); + keyValues.add(new KeyValueWithOp("key3", "value3", OpType.PUT)); + + final File sstFile = newSstFile(keyValues); + try (final StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final Options options = + new Options().setCreateIfMissing(true).setMergeOperator(stringAppendOperator); + final SstFileReader reader = new SstFileReader(options)) { + // Open the sst file and iterator + reader.open(sstFile.getAbsolutePath()); + final ReadOptions readOptions = new ReadOptions(); + final SstFileReaderIterator iterator = reader.newIterator(readOptions); + + // Use the iterator to read sst file + iterator.seekToFirst(); + + // Verify Checksum + reader.verifyChecksum(); + + // Verify Table Properties + assertEquals(reader.getTableProperties().getNumEntries(), 3); + + // Check key and value + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + + final ByteBuffer byteBuffer = byteBufferAllocator.allocate(128); + byteBuffer.put("key1".getBytes()).flip(); + iterator.seek(byteBuffer); + assertThat(byteBuffer.position()).isEqualTo(4); + assertThat(byteBuffer.limit()).isEqualTo(4); + + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + + { + byteBuffer.clear(); + assertThat(iterator.key(byteBuffer)).isEqualTo("key1".getBytes().length); + final byte[] dst = new byte["key1".getBytes().length]; + byteBuffer.get(dst); + assertThat(new String(dst)).isEqualTo("key1"); + } + + { + byteBuffer.clear(); + byteBuffer.put("PREFIX".getBytes()); + final ByteBuffer slice = byteBuffer.slice(); + assertThat(iterator.key(byteBuffer)).isEqualTo("key1".getBytes().length); + final byte[] dst = new byte["key1".getBytes().length]; + slice.get(dst); + assertThat(new String(dst)).isEqualTo("key1"); + } + + { + byteBuffer.clear(); + assertThat(iterator.value(byteBuffer)).isEqualTo("value1".getBytes().length); + final byte[] dst = new byte["value1".getBytes().length]; + byteBuffer.get(dst); + assertThat(new String(dst)).isEqualTo("value1"); + } + + byteBuffer.clear(); + byteBuffer.put("key1point5".getBytes()).flip(); + iterator.seek(byteBuffer); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + assertThat(iterator.value()).isEqualTo("value2".getBytes()); + + byteBuffer.clear(); + byteBuffer.put("key1point5".getBytes()).flip(); + iterator.seekForPrev(byteBuffer); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + + byteBuffer.clear(); + byteBuffer.put("key2point5".getBytes()).flip(); + iterator.seek(byteBuffer); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key3".getBytes()); + assertThat(iterator.value()).isEqualTo("value3".getBytes()); + + byteBuffer.clear(); + byteBuffer.put("key2point5".getBytes()).flip(); + iterator.seekForPrev(byteBuffer); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + assertThat(iterator.value()).isEqualTo("value2".getBytes()); + + byteBuffer.clear(); + byteBuffer.put("PREFIX".getBytes()); + final ByteBuffer slice = byteBuffer.slice(); + slice.put("key1point5".getBytes()).flip(); + iterator.seekForPrev(slice); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java new file mode 100644 index 000000000..87165bfe1 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/SstFileWriterTest.java @@ -0,0 +1,241 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.util.BytewiseComparator; + +public class SstFileWriterTest { + private static final String SST_FILE_NAME = "test.sst"; + private static final String DB_DIRECTORY_NAME = "test_db"; + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE + = new RocksNativeLibraryResource(); + + @Rule public TemporaryFolder parentFolder = new TemporaryFolder(); + + enum OpType { PUT, PUT_BYTES, PUT_DIRECT, MERGE, MERGE_BYTES, DELETE, DELETE_BYTES } + + static class KeyValueWithOp { + KeyValueWithOp(String key, String value, OpType opType) { + this.key = key; + this.value = value; + this.opType = opType; + } + + String getKey() { + return key; + } + + String getValue() { + return value; + } + + OpType getOpType() { + return opType; + } + + private final String key; + private final String value; + private final OpType opType; + }; + + private File newSstFile(final List keyValues, + boolean useJavaBytewiseComparator) throws IOException, RocksDBException { + final EnvOptions envOptions = new EnvOptions(); + final StringAppendOperator stringAppendOperator = new StringAppendOperator(); + final Options options = new Options().setMergeOperator(stringAppendOperator); + SstFileWriter sstFileWriter = null; + ComparatorOptions comparatorOptions = null; + BytewiseComparator comparator = null; + if (useJavaBytewiseComparator) { + comparatorOptions = new ComparatorOptions().setUseDirectBuffer(false); + comparator = new BytewiseComparator(comparatorOptions); + options.setComparator(comparator); + sstFileWriter = new SstFileWriter(envOptions, options); + } else { + sstFileWriter = new SstFileWriter(envOptions, options); + } + + final File sstFile = parentFolder.newFile(SST_FILE_NAME); + try { + sstFileWriter.open(sstFile.getAbsolutePath()); + assertThat(sstFileWriter.fileSize()).isEqualTo(0); + for (KeyValueWithOp keyValue : keyValues) { + Slice keySlice = new Slice(keyValue.getKey()); + Slice valueSlice = new Slice(keyValue.getValue()); + byte[] keyBytes = keyValue.getKey().getBytes(); + byte[] valueBytes = keyValue.getValue().getBytes(); + ByteBuffer keyDirect = ByteBuffer.allocateDirect(keyBytes.length); + keyDirect.put(keyBytes); + keyDirect.flip(); + ByteBuffer valueDirect = ByteBuffer.allocateDirect(valueBytes.length); + valueDirect.put(valueBytes); + valueDirect.flip(); + switch (keyValue.getOpType()) { + case PUT: + sstFileWriter.put(keySlice, valueSlice); + break; + case PUT_BYTES: + sstFileWriter.put(keyBytes, valueBytes); + break; + case PUT_DIRECT: + sstFileWriter.put(keyDirect, valueDirect); + assertThat(keyDirect.position()).isEqualTo(keyBytes.length); + assertThat(keyDirect.limit()).isEqualTo(keyBytes.length); + assertThat(valueDirect.position()).isEqualTo(valueBytes.length); + assertThat(valueDirect.limit()).isEqualTo(valueBytes.length); + break; + case MERGE: + sstFileWriter.merge(keySlice, valueSlice); + break; + case MERGE_BYTES: + sstFileWriter.merge(keyBytes, valueBytes); + break; + case DELETE: + sstFileWriter.delete(keySlice); + break; + case DELETE_BYTES: + sstFileWriter.delete(keyBytes); + break; + default: + fail("Unsupported op type"); + } + keySlice.close(); + valueSlice.close(); + } + sstFileWriter.finish(); + assertThat(sstFileWriter.fileSize()).isGreaterThan(100); + } finally { + assertThat(sstFileWriter).isNotNull(); + sstFileWriter.close(); + options.close(); + envOptions.close(); + if (comparatorOptions != null) { + comparatorOptions.close(); + } + if (comparator != null) { + comparator.close(); + } + } + return sstFile; + } + + @Test + public void generateSstFileWithJavaComparator() + throws RocksDBException, IOException { + final List keyValues = new ArrayList<>(); + keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT)); + keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT)); + keyValues.add(new KeyValueWithOp("key3", "value3", OpType.MERGE)); + keyValues.add(new KeyValueWithOp("key4", "value4", OpType.MERGE)); + keyValues.add(new KeyValueWithOp("key5", "", OpType.DELETE)); + + newSstFile(keyValues, true); + } + + @Test + public void generateSstFileWithNativeComparator() + throws RocksDBException, IOException { + final List keyValues = new ArrayList<>(); + keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT)); + keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT)); + keyValues.add(new KeyValueWithOp("key3", "value3", OpType.MERGE)); + keyValues.add(new KeyValueWithOp("key4", "value4", OpType.MERGE)); + keyValues.add(new KeyValueWithOp("key5", "", OpType.DELETE)); + + newSstFile(keyValues, false); + } + + @Test + public void ingestSstFile() throws RocksDBException, IOException { + final List keyValues = new ArrayList<>(); + keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT)); + keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT_DIRECT)); + keyValues.add(new KeyValueWithOp("key3", "value3", OpType.PUT_BYTES)); + keyValues.add(new KeyValueWithOp("key4", "value4", OpType.MERGE)); + keyValues.add(new KeyValueWithOp("key5", "value5", OpType.MERGE_BYTES)); + keyValues.add(new KeyValueWithOp("key6", "", OpType.DELETE)); + keyValues.add(new KeyValueWithOp("key7", "", OpType.DELETE)); + + + final File sstFile = newSstFile(keyValues, false); + final File dbFolder = parentFolder.newFolder(DB_DIRECTORY_NAME); + try(final StringAppendOperator stringAppendOperator = + new StringAppendOperator(); + final Options options = new Options() + .setCreateIfMissing(true) + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(options, dbFolder.getAbsolutePath()); + final IngestExternalFileOptions ingestExternalFileOptions = + new IngestExternalFileOptions()) { + db.ingestExternalFile(Arrays.asList(sstFile.getAbsolutePath()), + ingestExternalFileOptions); + + assertThat(db.get("key1".getBytes())).isEqualTo("value1".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo("value2".getBytes()); + assertThat(db.get("key3".getBytes())).isEqualTo("value3".getBytes()); + assertThat(db.get("key4".getBytes())).isEqualTo("value4".getBytes()); + assertThat(db.get("key5".getBytes())).isEqualTo("value5".getBytes()); + assertThat(db.get("key6".getBytes())).isEqualTo(null); + assertThat(db.get("key7".getBytes())).isEqualTo(null); + } + } + + @Test + public void ingestSstFile_cf() throws RocksDBException, IOException { + final List keyValues = new ArrayList<>(); + keyValues.add(new KeyValueWithOp("key1", "value1", OpType.PUT)); + keyValues.add(new KeyValueWithOp("key2", "value2", OpType.PUT)); + keyValues.add(new KeyValueWithOp("key3", "value3", OpType.MERGE)); + keyValues.add(new KeyValueWithOp("key4", "", OpType.DELETE)); + + final File sstFile = newSstFile(keyValues, false); + final File dbFolder = parentFolder.newFolder(DB_DIRECTORY_NAME); + try(final StringAppendOperator stringAppendOperator = + new StringAppendOperator(); + final Options options = new Options() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true) + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(options, dbFolder.getAbsolutePath()); + final IngestExternalFileOptions ingestExternalFileOptions = + new IngestExternalFileOptions()) { + + try(final ColumnFamilyOptions cf_opts = new ColumnFamilyOptions() + .setMergeOperator(stringAppendOperator); + final ColumnFamilyHandle cf_handle = db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf".getBytes(), cf_opts))) { + + db.ingestExternalFile(cf_handle, + Arrays.asList(sstFile.getAbsolutePath()), + ingestExternalFileOptions); + + assertThat(db.get(cf_handle, + "key1".getBytes())).isEqualTo("value1".getBytes()); + assertThat(db.get(cf_handle, + "key2".getBytes())).isEqualTo("value2".getBytes()); + assertThat(db.get(cf_handle, + "key3".getBytes())).isEqualTo("value3".getBytes()); + assertThat(db.get(cf_handle, + "key4".getBytes())).isEqualTo(null); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/SstPartitionerTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/SstPartitionerTest.java new file mode 100644 index 000000000..74816db93 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/SstPartitionerTest.java @@ -0,0 +1,72 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.List; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class SstPartitionerTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void sstFixedPrefix() throws RocksDBException { + try (SstPartitionerFixedPrefixFactory factory = new SstPartitionerFixedPrefixFactory(4); + final Options opt = + new Options().setCreateIfMissing(true).setSstPartitionerFactory(factory); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + // writing (long)100 under key + db.put("aaaa1".getBytes(), "A".getBytes()); + db.put("bbbb1".getBytes(), "B".getBytes()); + db.flush(new FlushOptions()); + + db.put("aaaa0".getBytes(), "A2".getBytes()); + db.put("aaaa2".getBytes(), "A2".getBytes()); + db.flush(new FlushOptions()); + + db.compactRange(); + + List metadata = db.getLiveFilesMetaData(); + assertThat(metadata.size()).isEqualTo(2); + } + } + + @Test + public void sstFixedPrefixFamily() throws RocksDBException { + final byte[] cfName = "new_cf".getBytes(UTF_8); + final ColumnFamilyDescriptor cfDescriptor = new ColumnFamilyDescriptor(cfName, + new ColumnFamilyOptions().setSstPartitionerFactory( + new SstPartitionerFixedPrefixFactory(4))); + + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath())) { + final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily(cfDescriptor); + + // writing (long)100 under key + db.put(columnFamilyHandle, "aaaa1".getBytes(), "A".getBytes()); + db.put(columnFamilyHandle, "bbbb1".getBytes(), "B".getBytes()); + db.flush(new FlushOptions(), columnFamilyHandle); + + db.put(columnFamilyHandle, "aaaa0".getBytes(), "A2".getBytes()); + db.put(columnFamilyHandle, "aaaa2".getBytes(), "A2".getBytes()); + db.flush(new FlushOptions(), columnFamilyHandle); + + db.compactRange(columnFamilyHandle); + + List metadata = db.getLiveFilesMetaData(); + assertThat(metadata.size()).isEqualTo(2); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java new file mode 100644 index 000000000..36721c80d --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Collections; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class StatisticsCollectorTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void statisticsCollector() + throws InterruptedException, RocksDBException { + try (final Statistics statistics = new Statistics(); + final Options opt = new Options() + .setStatistics(statistics) + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + + try(final Statistics stats = opt.statistics()) { + + final StatsCallbackMock callback = new StatsCallbackMock(); + final StatsCollectorInput statsInput = + new StatsCollectorInput(stats, callback); + + final StatisticsCollector statsCollector = new StatisticsCollector( + Collections.singletonList(statsInput), 100); + statsCollector.start(); + + Thread.sleep(1000); + + assertThat(callback.tickerCallbackCount).isGreaterThan(0); + assertThat(callback.histCallbackCount).isGreaterThan(0); + + statsCollector.shutDown(1000); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java new file mode 100644 index 000000000..de92102ec --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/StatisticsTest.java @@ -0,0 +1,168 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.nio.charset.StandardCharsets; + +import static org.assertj.core.api.Assertions.assertThat; + +public class StatisticsTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void statsLevel() throws RocksDBException { + final Statistics statistics = new Statistics(); + statistics.setStatsLevel(StatsLevel.ALL); + assertThat(statistics.statsLevel()).isEqualTo(StatsLevel.ALL); + } + + @Test + public void getTickerCount() throws RocksDBException { + try (final Statistics statistics = new Statistics(); + final Options opt = new Options() + .setStatistics(statistics) + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + + final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8); + final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8); + + db.put(key, value); + for(int i = 0; i < 10; i++) { + db.get(key); + } + + assertThat(statistics.getTickerCount(TickerType.BYTES_READ)).isGreaterThan(0); + } + } + + @Test + public void getAndResetTickerCount() throws RocksDBException { + try (final Statistics statistics = new Statistics(); + final Options opt = new Options() + .setStatistics(statistics) + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + + final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8); + final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8); + + db.put(key, value); + for(int i = 0; i < 10; i++) { + db.get(key); + } + + final long read = statistics.getAndResetTickerCount(TickerType.BYTES_READ); + assertThat(read).isGreaterThan(0); + + final long readAfterReset = statistics.getTickerCount(TickerType.BYTES_READ); + assertThat(readAfterReset).isLessThan(read); + } + } + + @Test + public void getHistogramData() throws RocksDBException { + try (final Statistics statistics = new Statistics(); + final Options opt = new Options() + .setStatistics(statistics) + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + + final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8); + final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8); + + db.put(key, value); + for(int i = 0; i < 10; i++) { + db.get(key); + } + + final HistogramData histogramData = statistics.getHistogramData(HistogramType.BYTES_PER_READ); + assertThat(histogramData).isNotNull(); + assertThat(histogramData.getAverage()).isGreaterThan(0); + assertThat(histogramData.getMedian()).isGreaterThan(0); + assertThat(histogramData.getPercentile95()).isGreaterThan(0); + assertThat(histogramData.getPercentile99()).isGreaterThan(0); + assertThat(histogramData.getStandardDeviation()).isEqualTo(0.00); + assertThat(histogramData.getMax()).isGreaterThan(0); + assertThat(histogramData.getCount()).isGreaterThan(0); + assertThat(histogramData.getSum()).isGreaterThan(0); + assertThat(histogramData.getMin()).isGreaterThan(0); + } + } + + @Test + public void getHistogramString() throws RocksDBException { + try (final Statistics statistics = new Statistics(); + final Options opt = new Options() + .setStatistics(statistics) + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + + final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8); + final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8); + + for(int i = 0; i < 10; i++) { + db.put(key, value); + } + + assertThat(statistics.getHistogramString(HistogramType.BYTES_PER_WRITE)).isNotNull(); + } + } + + @Test + public void reset() throws RocksDBException { + try (final Statistics statistics = new Statistics(); + final Options opt = new Options() + .setStatistics(statistics) + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + + final byte[] key = "some-key".getBytes(StandardCharsets.UTF_8); + final byte[] value = "some-value".getBytes(StandardCharsets.UTF_8); + + db.put(key, value); + for(int i = 0; i < 10; i++) { + db.get(key); + } + + final long read = statistics.getTickerCount(TickerType.BYTES_READ); + assertThat(read).isGreaterThan(0); + + statistics.reset(); + + final long readAfterReset = statistics.getTickerCount(TickerType.BYTES_READ); + assertThat(readAfterReset).isLessThan(read); + } + } + + @Test + public void ToString() throws RocksDBException { + try (final Statistics statistics = new Statistics(); + final Options opt = new Options() + .setStatistics(statistics) + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(statistics.toString()).isNotNull(); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java b/src/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java new file mode 100644 index 000000000..af8db0caa --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java @@ -0,0 +1,20 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +public class StatsCallbackMock implements StatisticsCollectorCallback { + public int tickerCallbackCount = 0; + public int histCallbackCount = 0; + + public void tickerCallback(TickerType tickerType, long tickerCount) { + tickerCallbackCount++; + } + + public void histogramCallback(HistogramType histType, + HistogramData histData) { + histCallbackCount++; + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TableFilterTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TableFilterTest.java new file mode 100644 index 000000000..2bd3b1798 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/TableFilterTest.java @@ -0,0 +1,106 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +public class TableFilterTest { + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void readOptions() throws RocksDBException { + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions() + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + + final List columnFamilyHandles = new ArrayList<>(); + + // open database + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, + columnFamilyHandles)) { + + try (final CfNameCollectionTableFilter cfNameCollectingTableFilter = + new CfNameCollectionTableFilter(); + final FlushOptions flushOptions = + new FlushOptions().setWaitForFlush(true); + final ReadOptions readOptions = + new ReadOptions().setTableFilter(cfNameCollectingTableFilter)) { + + db.put(columnFamilyHandles.get(0), + "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + db.put(columnFamilyHandles.get(0), + "key2".getBytes(UTF_8), "value2".getBytes(UTF_8)); + db.put(columnFamilyHandles.get(0), + "key3".getBytes(UTF_8), "value3".getBytes(UTF_8)); + db.put(columnFamilyHandles.get(1), + "key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + db.put(columnFamilyHandles.get(1), + "key2".getBytes(UTF_8), "value2".getBytes(UTF_8)); + db.put(columnFamilyHandles.get(1), + "key3".getBytes(UTF_8), "value3".getBytes(UTF_8)); + + db.flush(flushOptions, columnFamilyHandles); + + try (final RocksIterator iterator = + db.newIterator(columnFamilyHandles.get(0), readOptions)) { + iterator.seekToFirst(); + while (iterator.isValid()) { + iterator.key(); + iterator.value(); + iterator.next(); + } + } + + try (final RocksIterator iterator = + db.newIterator(columnFamilyHandles.get(1), readOptions)) { + iterator.seekToFirst(); + while (iterator.isValid()) { + iterator.key(); + iterator.value(); + iterator.next(); + } + } + + assertThat(cfNameCollectingTableFilter.cfNames.size()).isEqualTo(2); + assertThat(cfNameCollectingTableFilter.cfNames.get(0)) + .isEqualTo(RocksDB.DEFAULT_COLUMN_FAMILY); + assertThat(cfNameCollectingTableFilter.cfNames.get(1)) + .isEqualTo("new_cf".getBytes(UTF_8)); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + columnFamilyHandle.close(); + } + } + } + } + } + + private static class CfNameCollectionTableFilter extends AbstractTableFilter { + private final List cfNames = new ArrayList<>(); + + @Override + public boolean filter(final TableProperties tableProperties) { + cfNames.add(tableProperties.getColumnFamilyName()); + return true; + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TimedEnvTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TimedEnvTest.java new file mode 100644 index 000000000..c958f96b2 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/TimedEnvTest.java @@ -0,0 +1,43 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static java.nio.charset.StandardCharsets.UTF_8; + +public class TimedEnvTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void construct() throws RocksDBException { + try (final Env env = new TimedEnv(Env.getDefault())) { + // no-op + } + } + + @Test + public void construct_integration() throws RocksDBException { + try (final Env env = new TimedEnv(Env.getDefault()); + final Options options = new Options() + .setCreateIfMissing(true) + .setEnv(env); + ) { + try (final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getPath())) { + db.put("key1".getBytes(UTF_8), "value1".getBytes(UTF_8)); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java new file mode 100644 index 000000000..7eaa6b16c --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBOptionsTest.java @@ -0,0 +1,64 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TransactionDBOptionsTest { + + private static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void maxNumLocks() { + try (final TransactionDBOptions opt = new TransactionDBOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxNumLocks(longValue); + assertThat(opt.getMaxNumLocks()).isEqualTo(longValue); + } + } + + @Test + public void maxNumStripes() { + try (final TransactionDBOptions opt = new TransactionDBOptions()) { + final long longValue = rand.nextLong(); + opt.setNumStripes(longValue); + assertThat(opt.getNumStripes()).isEqualTo(longValue); + } + } + + @Test + public void transactionLockTimeout() { + try (final TransactionDBOptions opt = new TransactionDBOptions()) { + final long longValue = rand.nextLong(); + opt.setTransactionLockTimeout(longValue); + assertThat(opt.getTransactionLockTimeout()).isEqualTo(longValue); + } + } + + @Test + public void defaultLockTimeout() { + try (final TransactionDBOptions opt = new TransactionDBOptions()) { + final long longValue = rand.nextLong(); + opt.setDefaultLockTimeout(longValue); + assertThat(opt.getDefaultLockTimeout()).isEqualTo(longValue); + } + } + + @Test + public void writePolicy() { + try (final TransactionDBOptions opt = new TransactionDBOptions()) { + final TxnDBWritePolicy writePolicy = TxnDBWritePolicy.WRITE_UNPREPARED; // non-default + opt.setWritePolicy(writePolicy); + assertThat(opt.getWritePolicy()).isEqualTo(writePolicy); + } + } + +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBTest.java new file mode 100644 index 000000000..b0ea813ff --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionDBTest.java @@ -0,0 +1,178 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.*; + +import static org.assertj.core.api.Assertions.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; + +public class TransactionDBTest { + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void open() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(tdb).isNotNull(); + } + } + + @Test + public void open_columnFamilies() throws RocksDBException { + try(final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions myCfOpts = new ColumnFamilyOptions()) { + + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("myCf".getBytes(), myCfOpts)); + + final List columnFamilyHandles = new ArrayList<>(); + + try (final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(dbOptions, txnDbOptions, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles)) { + try { + assertThat(tdb).isNotNull(); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void beginTransaction() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions()) { + + try(final Transaction txn = tdb.beginTransaction(writeOptions)) { + assertThat(txn).isNotNull(); + } + } + } + + @Test + public void beginTransaction_transactionOptions() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions(); + final TransactionOptions txnOptions = new TransactionOptions()) { + + try(final Transaction txn = tdb.beginTransaction(writeOptions, + txnOptions)) { + assertThat(txn).isNotNull(); + } + } + } + + @Test + public void beginTransaction_withOld() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions()) { + + try(final Transaction txn = tdb.beginTransaction(writeOptions)) { + final Transaction txnReused = tdb.beginTransaction(writeOptions, txn); + assertThat(txnReused).isSameAs(txn); + } + } + } + + @Test + public void beginTransaction_withOld_transactionOptions() + throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions(); + final TransactionOptions txnOptions = new TransactionOptions()) { + + try(final Transaction txn = tdb.beginTransaction(writeOptions)) { + final Transaction txnReused = tdb.beginTransaction(writeOptions, + txnOptions, txn); + assertThat(txnReused).isSameAs(txn); + } + } + } + + @Test + public void lockStatusData() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions writeOptions = new WriteOptions(); + final ReadOptions readOptions = new ReadOptions()) { + + try (final Transaction txn = tdb.beginTransaction(writeOptions)) { + + final byte key[] = "key".getBytes(UTF_8); + final byte value[] = "value".getBytes(UTF_8); + + txn.put(key, value); + assertThat(txn.getForUpdate(readOptions, key, true)).isEqualTo(value); + + final Map lockStatus = + tdb.getLockStatusData(); + + assertThat(lockStatus.size()).isEqualTo(1); + final Set> entrySet = lockStatus.entrySet(); + final Map.Entry entry = entrySet.iterator().next(); + final long columnFamilyId = entry.getKey(); + assertThat(columnFamilyId).isEqualTo(0); + final TransactionDB.KeyLockInfo keyLockInfo = entry.getValue(); + assertThat(keyLockInfo.getKey()).isEqualTo(new String(key, UTF_8)); + assertThat(keyLockInfo.getTransactionIDs().length).isEqualTo(1); + assertThat(keyLockInfo.getTransactionIDs()[0]).isEqualTo(txn.getId()); + assertThat(keyLockInfo.isExclusive()).isTrue(); + } + } + } + + @Test + public void deadlockInfoBuffer() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath())) { + + // TODO(AR) can we cause a deadlock so that we can test the output here? + assertThat(tdb.getDeadlockInfoBuffer()).isEmpty(); + } + } + + @Test + public void setDeadlockInfoBufferSize() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final TransactionDB tdb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath())) { + tdb.setDeadlockInfoBufferSize(123); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java new file mode 100644 index 000000000..3c4dff7bb --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java @@ -0,0 +1,139 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TransactionLogIteratorTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void transactionLogIterator() throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { + //no-op + } + } + + @Test + public void getBatch() throws RocksDBException { + final int numberOfPuts = 5; + try (final Options options = new Options() + .setCreateIfMissing(true) + .setWalTtlSeconds(1000) + .setWalSizeLimitMB(10); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + for (int i = 0; i < numberOfPuts; i++) { + db.put(String.valueOf(i).getBytes(), + String.valueOf(i).getBytes()); + } + db.flush(new FlushOptions().setWaitForFlush(true)); + + // the latest sequence number is 5 because 5 puts + // were written beforehand + assertThat(db.getLatestSequenceNumber()). + isEqualTo(numberOfPuts); + + // insert 5 writes into a cf + try (final ColumnFamilyHandle cfHandle = db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf".getBytes()))) { + for (int i = 0; i < numberOfPuts; i++) { + db.put(cfHandle, String.valueOf(i).getBytes(), + String.valueOf(i).getBytes()); + } + // the latest sequence number is 10 because + // (5 + 5) puts were written beforehand + assertThat(db.getLatestSequenceNumber()). + isEqualTo(numberOfPuts + numberOfPuts); + + // Get updates since the beginning + try (final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { + assertThat(transactionLogIterator.isValid()).isTrue(); + transactionLogIterator.status(); + + // The first sequence number is 1 + final TransactionLogIterator.BatchResult batchResult = + transactionLogIterator.getBatch(); + assertThat(batchResult.sequenceNumber()).isEqualTo(1); + } + } + } + } + + @Test + public void transactionLogIteratorStallAtLastRecord() + throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setWalTtlSeconds(1000) + .setWalSizeLimitMB(10); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + db.put("key1".getBytes(), "value1".getBytes()); + // Get updates since the beginning + try (final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { + transactionLogIterator.status(); + assertThat(transactionLogIterator.isValid()).isTrue(); + transactionLogIterator.next(); + assertThat(transactionLogIterator.isValid()).isFalse(); + transactionLogIterator.status(); + db.put("key2".getBytes(), "value2".getBytes()); + transactionLogIterator.next(); + transactionLogIterator.status(); + assertThat(transactionLogIterator.isValid()).isTrue(); + } + } + } + + @Test + public void transactionLogIteratorCheckAfterRestart() + throws RocksDBException { + final int numberOfKeys = 2; + try (final Options options = new Options() + .setCreateIfMissing(true) + .setWalTtlSeconds(1000) + .setWalSizeLimitMB(10)) { + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1".getBytes()); + db.put("key2".getBytes(), "value2".getBytes()); + db.flush(new FlushOptions().setWaitForFlush(true)); + + } + + // reopen + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(db.getLatestSequenceNumber()).isEqualTo(numberOfKeys); + + try (final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { + for (int i = 0; i < numberOfKeys; i++) { + transactionLogIterator.status(); + assertThat(transactionLogIterator.isValid()).isTrue(); + transactionLogIterator.next(); + } + } + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TransactionOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionOptionsTest.java new file mode 100644 index 000000000..add0439e0 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionOptionsTest.java @@ -0,0 +1,72 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TransactionOptionsTest { + + private static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void snapshot() { + try (final TransactionOptions opt = new TransactionOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setSetSnapshot(boolValue); + assertThat(opt.isSetSnapshot()).isEqualTo(boolValue); + } + } + + @Test + public void deadlockDetect() { + try (final TransactionOptions opt = new TransactionOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDeadlockDetect(boolValue); + assertThat(opt.isDeadlockDetect()).isEqualTo(boolValue); + } + } + + @Test + public void lockTimeout() { + try (final TransactionOptions opt = new TransactionOptions()) { + final long longValue = rand.nextLong(); + opt.setLockTimeout(longValue); + assertThat(opt.getLockTimeout()).isEqualTo(longValue); + } + } + + @Test + public void expiration() { + try (final TransactionOptions opt = new TransactionOptions()) { + final long longValue = rand.nextLong(); + opt.setExpiration(longValue); + assertThat(opt.getExpiration()).isEqualTo(longValue); + } + } + + @Test + public void deadlockDetectDepth() { + try (final TransactionOptions opt = new TransactionOptions()) { + final long longValue = rand.nextLong(); + opt.setDeadlockDetectDepth(longValue); + assertThat(opt.getDeadlockDetectDepth()).isEqualTo(longValue); + } + } + + @Test + public void maxWriteBatchSize() { + try (final TransactionOptions opt = new TransactionOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxWriteBatchSize(longValue); + assertThat(opt.getMaxWriteBatchSize()).isEqualTo(longValue); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TransactionTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionTest.java new file mode 100644 index 000000000..8a3067de9 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/TransactionTest.java @@ -0,0 +1,488 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +public class TransactionTest extends AbstractTransactionTest { + + @Test + public void getForUpdate_cf_conflict() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] v12 = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, k1, v1); + assertThat(txn.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, testCf, k1, true)).isEqualTo(v1); + + // NOTE: txn2 updates k1, during txn3 + try { + txn2.put(testCf, k1, v12); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void prepare_commit() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] v12 = "value12".getBytes(UTF_8); + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + txn.commit(); + } + + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.setName("txnPrepare1"); + txn.put(k1, v12); + txn.prepare(); + txn.commit(); + } + + try (final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.get(readOptions, k1)).isEqualTo(v12); + } + } + } + + @Test + public void prepare_rollback() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] v12 = "value12".getBytes(UTF_8); + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + txn.commit(); + } + + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.setName("txnPrepare1"); + txn.put(k1, v12); + txn.prepare(); + txn.rollback(); + } + + try (final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + } + } + } + + @Test + public void prepare_read_prepared_commit() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] v12 = "value12".getBytes(UTF_8); + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + txn.commit(); + } + + Transaction txnPrepare; + txnPrepare = dbContainer.beginTransaction(); + txnPrepare.setName("txnPrepare1"); + txnPrepare.put(k1, v12); + txnPrepare.prepare(); + + try (final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + } + + txnPrepare.commit(); + + try (final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.get(readOptions, k1)).isEqualTo(v12); + } + } + } + + @Test + public void prepare_read_prepared_rollback() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] v12 = "value12".getBytes(UTF_8); + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + txn.commit(); + } + + Transaction txnPrepare; + txnPrepare = dbContainer.beginTransaction(); + txnPrepare.setName("txnPrepare1"); + txnPrepare.put(k1, v12); + txnPrepare.prepare(); + + try (final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + } + + txnPrepare.rollback(); + + try (final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.get(readOptions, k1)).isEqualTo(v1); + } + } + } + + @Test + public void getForUpdate_conflict() throws RocksDBException { + final byte[] k1 = "key1".getBytes(UTF_8); + final byte[] v1 = "value1".getBytes(UTF_8); + final byte[] v12 = "value12".getBytes(UTF_8); + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(k1, v1); + assertThat(txn.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.getForUpdate(readOptions, k1, true)).isEqualTo(v1); + + // NOTE: txn2 updates k1, during txn3 + try { + txn2.put(k1, v12); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void multiGetForUpdate_cf_conflict() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, keys[0], values[0]); + txn.put(testCf, keys[1], values[1]); + assertThat(txn.multiGet(readOptions, cfList, keys)).isEqualTo(values); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdate(readOptions, cfList, keys)) + .isEqualTo(values); + + // NOTE: txn2 updates k1, during txn3 + try { + txn2.put(testCf, keys[0], otherValue); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void multiGetAsListForUpdate_cf_conflict() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + final ColumnFamilyHandle testCf = dbContainer.getTestColumnFamily(); + final List cfList = Arrays.asList(testCf, testCf); + + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.put(testCf, keys[0], values[0]); + txn.put(testCf, keys[1], values[1]); + assertThat(txn.multiGetAsList(readOptions, cfList, Arrays.asList(keys))) + .containsExactly(values); + txn.commit(); + } + + try (final Transaction txn2 = dbContainer.beginTransaction()) { + try (final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdateAsList(readOptions, cfList, Arrays.asList(keys))) + .containsExactly(values); + + // NOTE: txn2 updates k1, during txn3 + try { + txn2.put(testCf, keys[0], otherValue); // should cause an exception! + } catch (final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void multiGetForUpdate_conflict() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try(final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + try(final Transaction txn = dbContainer.beginTransaction()) { + txn.put(keys[0], values[0]); + txn.put(keys[1], values[1]); + assertThat(txn.multiGet(readOptions, keys)).isEqualTo(values); + txn.commit(); + } + + try(final Transaction txn2 = dbContainer.beginTransaction()) { + try(final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdate(readOptions, keys)) + .isEqualTo(values); + + // NOTE: txn2 updates k1, during txn3 + try { + txn2.put(keys[0], otherValue); // should cause an exception! + } catch (final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void multiGetAsListForUpdate_conflict() throws RocksDBException { + final byte[][] keys = new byte[][] {"key1".getBytes(UTF_8), "key2".getBytes(UTF_8)}; + final byte[][] values = new byte[][] {"value1".getBytes(UTF_8), "value2".getBytes(UTF_8)}; + final byte[] otherValue = "otherValue".getBytes(UTF_8); + + try (final DBContainer dbContainer = startDb(); + final ReadOptions readOptions = new ReadOptions()) { + try (final Transaction txn = dbContainer.beginTransaction()) { + txn.put(keys[0], values[0]); + txn.put(keys[1], values[1]); + assertThat(txn.multiGetAsList(readOptions, Arrays.asList(keys))).containsExactly(values); + txn.commit(); + } + + try (final Transaction txn2 = dbContainer.beginTransaction()) { + try (final Transaction txn3 = dbContainer.beginTransaction()) { + assertThat(txn3.multiGetForUpdateAsList(readOptions, Arrays.asList(keys))) + .containsExactly(values); + + // NOTE: txn2 updates k1, during txn3 + try { + txn2.put(keys[0], otherValue); // should cause an exception! + } catch(final RocksDBException e) { + assertThat(e.getStatus().getCode()).isSameAs(Status.Code.TimedOut); + return; + } + } + } + + fail("Expected an exception for put after getForUpdate from conflicting" + + "transactions"); + } + } + + @Test + public void name() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getName()).isEmpty(); + final String name = "my-transaction-" + rand.nextLong(); + txn.setName(name); + assertThat(txn.getName()).isEqualTo(name); + } + } + + @Test + public void ID() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getID()).isGreaterThan(0); + } + } + + @Test + public void deadlockDetect() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.isDeadlockDetect()).isFalse(); + } + } + + @Test + public void waitingTxns() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getWaitingTxns().getTransactionIds().length).isEqualTo(0); + } + } + + @Test + public void state() throws RocksDBException { + try(final DBContainer dbContainer = startDb()) { + + try(final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getState()) + .isSameAs(Transaction.TransactionState.STARTED); + txn.commit(); + assertThat(txn.getState()) + .isSameAs(Transaction.TransactionState.COMMITTED); + } + + try(final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getState()) + .isSameAs(Transaction.TransactionState.STARTED); + txn.rollback(); + assertThat(txn.getState()) + .isSameAs(Transaction.TransactionState.STARTED); + } + } + } + + @Test + public void Id() throws RocksDBException { + try(final DBContainer dbContainer = startDb(); + final Transaction txn = dbContainer.beginTransaction()) { + assertThat(txn.getId()).isNotNull(); + } + } + + @Override + public TransactionDBContainer startDb() throws RocksDBException { + final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final TransactionDBOptions txnDbOptions = new TransactionDBOptions(); + final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions(); + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(TXN_TEST_COLUMN_FAMILY, + columnFamilyOptions)); + final List columnFamilyHandles = new ArrayList<>(); + + final TransactionDB txnDb; + try { + txnDb = TransactionDB.open(options, txnDbOptions, + dbFolder.getRoot().getAbsolutePath(), columnFamilyDescriptors, + columnFamilyHandles); + } catch(final RocksDBException e) { + columnFamilyOptions.close(); + txnDbOptions.close(); + options.close(); + throw e; + } + + final WriteOptions writeOptions = new WriteOptions(); + final TransactionOptions txnOptions = new TransactionOptions(); + + return new TransactionDBContainer(txnOptions, writeOptions, + columnFamilyHandles, txnDb, txnDbOptions, columnFamilyOptions, options); + } + + private static class TransactionDBContainer + extends DBContainer { + private final TransactionOptions txnOptions; + private final TransactionDB txnDb; + private final TransactionDBOptions txnDbOptions; + + public TransactionDBContainer( + final TransactionOptions txnOptions, final WriteOptions writeOptions, + final List columnFamilyHandles, + final TransactionDB txnDb, final TransactionDBOptions txnDbOptions, + final ColumnFamilyOptions columnFamilyOptions, + final DBOptions options) { + super(writeOptions, columnFamilyHandles, columnFamilyOptions, + options); + this.txnOptions = txnOptions; + this.txnDb = txnDb; + this.txnDbOptions = txnDbOptions; + } + + @Override + public Transaction beginTransaction() { + return txnDb.beginTransaction(writeOptions, txnOptions); + } + + @Override + public Transaction beginTransaction(final WriteOptions writeOptions) { + return txnDb.beginTransaction(writeOptions, txnOptions); + } + + @Override + public void close() { + txnOptions.close(); + writeOptions.close(); + for(final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { + columnFamilyHandle.close(); + } + txnDb.close(); + txnDbOptions.close(); + options.close(); + } + } + +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java new file mode 100644 index 000000000..ffa15e768 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java @@ -0,0 +1,112 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TtlDBTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void ttlDBOpen() throws RocksDBException, InterruptedException { + try (final Options options = new Options().setCreateIfMissing(true).setMaxCompactionBytes(0); + final TtlDB ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + ttlDB.put("key".getBytes(), "value".getBytes()); + assertThat(ttlDB.get("key".getBytes())). + isEqualTo("value".getBytes()); + assertThat(ttlDB.get("key".getBytes())).isNotNull(); + } + } + + @Test + public void ttlDBOpenWithTtl() throws RocksDBException, InterruptedException { + try (final Options options = new Options().setCreateIfMissing(true).setMaxCompactionBytes(0); + final TtlDB ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath(), 1, false);) { + ttlDB.put("key".getBytes(), "value".getBytes()); + assertThat(ttlDB.get("key".getBytes())). + isEqualTo("value".getBytes()); + TimeUnit.SECONDS.sleep(2); + ttlDB.compactRange(); + assertThat(ttlDB.get("key".getBytes())).isNull(); + } + } + + @Test + public void ttlDbOpenWithColumnFamilies() throws RocksDBException, + InterruptedException { + final List cfNames = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes()) + ); + final List ttlValues = Arrays.asList(0, 1); + + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions dbOptions = new DBOptions() + .setCreateMissingColumnFamilies(true) + .setCreateIfMissing(true); + final TtlDB ttlDB = TtlDB.open(dbOptions, + dbFolder.getRoot().getAbsolutePath(), cfNames, + columnFamilyHandleList, ttlValues, false)) { + try { + ttlDB.put("key".getBytes(), "value".getBytes()); + assertThat(ttlDB.get("key".getBytes())). + isEqualTo("value".getBytes()); + ttlDB.put(columnFamilyHandleList.get(1), "key".getBytes(), + "value".getBytes()); + assertThat(ttlDB.get(columnFamilyHandleList.get(1), + "key".getBytes())).isEqualTo("value".getBytes()); + TimeUnit.SECONDS.sleep(2); + + ttlDB.compactRange(); + ttlDB.compactRange(columnFamilyHandleList.get(1)); + + assertThat(ttlDB.get("key".getBytes())).isNotNull(); + assertThat(ttlDB.get(columnFamilyHandleList.get(1), + "key".getBytes())).isNull(); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test + public void createTtlColumnFamily() throws RocksDBException, + InterruptedException { + try (final Options options = new Options().setCreateIfMissing(true); + final TtlDB ttlDB = TtlDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyHandle columnFamilyHandle = + ttlDB.createColumnFamilyWithTtl( + new ColumnFamilyDescriptor("new_cf".getBytes()), 1)) { + ttlDB.put(columnFamilyHandle, "key".getBytes(), + "value".getBytes()); + assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())). + isEqualTo("value".getBytes()); + TimeUnit.SECONDS.sleep(2); + ttlDB.compactRange(columnFamilyHandle); + assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())).isNull(); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/Types.java b/src/rocksdb/java/src/test/java/org/rocksdb/Types.java new file mode 100644 index 000000000..c3c1de833 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/Types.java @@ -0,0 +1,43 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +/** + * Simple type conversion methods + * for use in tests + */ +public class Types { + + /** + * Convert first 4 bytes of a byte array to an int + * + * @param data The byte array + * + * @return An integer + */ + public static int byteToInt(final byte data[]) { + return (data[0] & 0xff) | + ((data[1] & 0xff) << 8) | + ((data[2] & 0xff) << 16) | + ((data[3] & 0xff) << 24); + } + + /** + * Convert an int to 4 bytes + * + * @param v The int + * + * @return A byte array containing 4 bytes + */ + public static byte[] intToByte(final int v) { + return new byte[] { + (byte)((v >>> 0) & 0xff), + (byte)((v >>> 8) & 0xff), + (byte)((v >>> 16) & 0xff), + (byte)((v >>> 24) & 0xff) + }; + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/VerifyChecksumsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/VerifyChecksumsTest.java new file mode 100644 index 000000000..ddc2a456f --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/VerifyChecksumsTest.java @@ -0,0 +1,213 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +public class VerifyChecksumsTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule public TemporaryFolder dbFolder = new TemporaryFolder(); + + /** + * Class to factor out the specific DB operations within the test + */ + abstract static class Operations { + final int kv_count; + final List elements = new ArrayList<>(); + final List sortedElements = new ArrayList<>(); + + Operations(final int kv_count) { + this.kv_count = kv_count; + for (int i = 0; i < kv_count; i++) elements.add(MessageFormat.format("{0,number,#}", i)); + sortedElements.addAll(elements); + Collections.sort(sortedElements); + } + + void fill(final RocksDB db) throws RocksDBException { + for (int i = 0; i < kv_count; i++) { + final String key = MessageFormat.format("key{0}", elements.get(i)); + final String value = MessageFormat.format("value{0}", elements.get(i)); + // noinspection ObjectAllocationInLoop + db.put(key.getBytes(), value.getBytes()); + } + db.flush(new FlushOptions()); + } + + @SuppressWarnings("ObjectAllocationInLoop") + void get(final RocksDB db, final boolean verifyFlag) throws RocksDBException { + try (final ReadOptions readOptions = new ReadOptions()) { + readOptions.setReadaheadSize(32 * 1024); + readOptions.setFillCache(false); + readOptions.setVerifyChecksums(verifyFlag); + + for (int i = 0; i < kv_count / 10; i++) { + @SuppressWarnings("UnsecureRandomNumberGeneration") + final int index = Double.valueOf(Math.random() * kv_count).intValue(); + final String key = MessageFormat.format("key{0}", sortedElements.get(index)); + final String expectedValue = MessageFormat.format("value{0}", sortedElements.get(index)); + + final byte[] value = db.get(readOptions, key.getBytes()); + assertThat(value).isEqualTo(expectedValue.getBytes()); + } + } + } + + @SuppressWarnings("ObjectAllocationInLoop") + void multiGet(final RocksDB db, final boolean verifyFlag) throws RocksDBException { + try (final ReadOptions readOptions = new ReadOptions()) { + readOptions.setReadaheadSize(32 * 1024); + readOptions.setFillCache(false); + readOptions.setVerifyChecksums(verifyFlag); + + final List keys = new ArrayList<>(); + final List expectedValues = new ArrayList<>(); + + for (int i = 0; i < kv_count / 10; i++) { + @SuppressWarnings("UnsecureRandomNumberGeneration") + final int index = Double.valueOf(Math.random() * kv_count).intValue(); + keys.add(MessageFormat.format("key{0}", sortedElements.get(index)).getBytes()); + + expectedValues.add(MessageFormat.format("value{0}", sortedElements.get(index))); + } + + final List values = db.multiGetAsList(readOptions, keys); + for (int i = 0; i < keys.size(); i++) { + assertThat(values.get(i)).isEqualTo(expectedValues.get(i).getBytes()); + } + } + } + + void iterate(final RocksDB db, final boolean verifyFlag) throws RocksDBException { + final ReadOptions readOptions = new ReadOptions(); + readOptions.setReadaheadSize(32 * 1024); + readOptions.setFillCache(false); + readOptions.setVerifyChecksums(verifyFlag); + int i = 0; + try (final RocksIterator rocksIterator = db.newIterator(readOptions)) { + rocksIterator.seekToFirst(); + rocksIterator.status(); + while (rocksIterator.isValid()) { + final byte[] key = rocksIterator.key(); + final byte[] value = rocksIterator.value(); + // noinspection ObjectAllocationInLoop + assertThat(key).isEqualTo( + (MessageFormat.format("key{0}", sortedElements.get(i))).getBytes()); + // noinspection ObjectAllocationInLoop + assertThat(value).isEqualTo( + (MessageFormat.format("value{0}", sortedElements.get(i))).getBytes()); + rocksIterator.next(); + rocksIterator.status(); + i++; + } + } + assertThat(i).isEqualTo(kv_count); + } + + abstract void performOperations(final RocksDB db, final boolean verifyFlag) + throws RocksDBException; + } + + private static final int KV_COUNT = 10000; + + /** + * Run some operations and count the TickerType.BLOCK_CHECKSUM_COMPUTE_COUNT before and after + * It should GO UP when the read options have checksum verification turned on. + * It shoulld REMAIN UNCHANGED when the read options have checksum verification turned off. + * As the read options refer only to the read operations, there are still a few checksums + * performed outside this (blocks are getting loaded for lots of reasons, not aways directly due + * to reads) but this test provides a good enough proxy for whether the flag is being noticed. + * + * @param operations the DB reading operations to perform which affect the checksum stats + * + * @throws RocksDBException + */ + private void verifyChecksums(final Operations operations) throws RocksDBException { + final String dbPath = dbFolder.getRoot().getAbsolutePath(); + + // noinspection SingleStatementInBlock + try (final Statistics statistics = new Statistics(); + final Options options = new Options().setCreateIfMissing(true).setStatistics(statistics)) { + try (final RocksDB db = RocksDB.open(options, dbPath)) { + // 0 + System.out.println(MessageFormat.format( + "newly open {0}", statistics.getTickerCount(TickerType.BLOCK_CHECKSUM_COMPUTE_COUNT))); + operations.fill(db); + // + System.out.println(MessageFormat.format( + "flushed {0}", statistics.getTickerCount(TickerType.BLOCK_CHECKSUM_COMPUTE_COUNT))); + } + + // 2 + System.out.println(MessageFormat.format("closed-after-write {0}", + statistics.getTickerCount(TickerType.BLOCK_CHECKSUM_COMPUTE_COUNT))); + + for (final boolean verifyFlag : new boolean[] {false, true, false, true}) { + try (final RocksDB db = RocksDB.open(options, dbPath)) { + final long beforeOperationsCount = + statistics.getTickerCount(TickerType.BLOCK_CHECKSUM_COMPUTE_COUNT); + System.out.println(MessageFormat.format("re-opened {0}", beforeOperationsCount)); + operations.performOperations(db, verifyFlag); + final long afterOperationsCount = + statistics.getTickerCount(TickerType.BLOCK_CHECKSUM_COMPUTE_COUNT); + if (verifyFlag) { + // We don't need to be exact - we are checking that the checksums happen + // exactly how many depends on block size etc etc, so may not be entirely stable + System.out.println(MessageFormat.format("verify=true {0}", afterOperationsCount)); + assertThat(afterOperationsCount).isGreaterThan(beforeOperationsCount + 20); + } else { + System.out.println(MessageFormat.format("verify=false {0}", afterOperationsCount)); + assertThat(afterOperationsCount).isEqualTo(beforeOperationsCount); + } + } + } + } + } + + @Test + public void verifyChecksumsInIteration() throws RocksDBException { + // noinspection AnonymousInnerClassMayBeStatic + verifyChecksums(new Operations(KV_COUNT) { + @Override + void performOperations(final RocksDB db, final boolean verifyFlag) throws RocksDBException { + iterate(db, verifyFlag); + } + }); + } + + @Test + public void verifyChecksumsGet() throws RocksDBException { + // noinspection AnonymousInnerClassMayBeStatic + verifyChecksums(new Operations(KV_COUNT) { + @Override + void performOperations(final RocksDB db, final boolean verifyFlag) throws RocksDBException { + get(db, verifyFlag); + } + }); + } + + @Test + public void verifyChecksumsMultiGet() throws RocksDBException { + // noinspection AnonymousInnerClassMayBeStatic + verifyChecksums(new Operations(KV_COUNT) { + @Override + void performOperations(final RocksDB db, final boolean verifyFlag) throws RocksDBException { + multiGet(db, verifyFlag); + } + }); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java new file mode 100644 index 000000000..2a0133f6b --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/WALRecoveryModeTest.java @@ -0,0 +1,22 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + + +public class WALRecoveryModeTest { + + @Test + public void getWALRecoveryMode() { + for (final WALRecoveryMode walRecoveryMode : WALRecoveryMode.values()) { + assertThat(WALRecoveryMode.getWALRecoveryMode(walRecoveryMode.getValue())) + .isEqualTo(walRecoveryMode); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WalFilterTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WalFilterTest.java new file mode 100644 index 000000000..adeb959d1 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/WalFilterTest.java @@ -0,0 +1,165 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.rocksdb.util.ByteUtil.bytes; +import static org.rocksdb.util.TestUtil.*; + +public class WalFilterTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void walFilter() throws RocksDBException { + // Create 3 batches with two keys each + final byte[][][] batchKeys = { + new byte[][] { + bytes("key1"), + bytes("key2") + }, + new byte[][] { + bytes("key3"), + bytes("key4") + }, + new byte[][] { + bytes("key5"), + bytes("key6") + } + + }; + + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor(bytes("pikachu")) + ); + final List cfHandles = new ArrayList<>(); + + // Test with all WAL processing options + for (final WalProcessingOption option : WalProcessingOption.values()) { + try (final Options options = optionsForLogIterTest(); + final DBOptions dbOptions = new DBOptions(options) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(dbOptions, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, cfHandles)) { + try (final WriteOptions writeOptions = new WriteOptions()) { + // Write given keys in given batches + for (int i = 0; i < batchKeys.length; i++) { + final WriteBatch batch = new WriteBatch(); + for (int j = 0; j < batchKeys[i].length; j++) { + batch.put(cfHandles.get(0), batchKeys[i][j], dummyString(1024)); + } + db.write(writeOptions, batch); + } + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + cfHandles.clear(); + } + } + + // Create a test filter that would apply wal_processing_option at the first + // record + final int applyOptionForRecordIndex = 1; + try (final TestableWalFilter walFilter = + new TestableWalFilter(option, applyOptionForRecordIndex)) { + + try (final Options options = optionsForLogIterTest(); + final DBOptions dbOptions = new DBOptions(options) + .setWalFilter(walFilter)) { + + try (final RocksDB db = RocksDB.open(dbOptions, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, cfHandles)) { + + try { + assertThat(walFilter.logNumbers).isNotEmpty(); + assertThat(walFilter.logFileNames).isNotEmpty(); + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + cfHandles.clear(); + } + } catch (final RocksDBException e) { + if (option != WalProcessingOption.CORRUPTED_RECORD) { + // exception is expected when CORRUPTED_RECORD! + throw e; + } + } + } + } + } + } + + + private static class TestableWalFilter extends AbstractWalFilter { + private final WalProcessingOption walProcessingOption; + private final int applyOptionForRecordIndex; + Map cfLognumber; + Map cfNameId; + final List logNumbers = new ArrayList<>(); + final List logFileNames = new ArrayList<>(); + private int currentRecordIndex = 0; + + public TestableWalFilter(final WalProcessingOption walProcessingOption, + final int applyOptionForRecordIndex) { + super(); + this.walProcessingOption = walProcessingOption; + this.applyOptionForRecordIndex = applyOptionForRecordIndex; + } + + @Override + public void columnFamilyLogNumberMap(final Map cfLognumber, + final Map cfNameId) { + this.cfLognumber = cfLognumber; + this.cfNameId = cfNameId; + } + + @Override + public LogRecordFoundResult logRecordFound( + final long logNumber, final String logFileName, final WriteBatch batch, + final WriteBatch newBatch) { + + logNumbers.add(logNumber); + logFileNames.add(logFileName); + + final WalProcessingOption optionToReturn; + if (currentRecordIndex == applyOptionForRecordIndex) { + optionToReturn = walProcessingOption; + } + else { + optionToReturn = WalProcessingOption.CONTINUE_PROCESSING; + } + + currentRecordIndex++; + + return new LogRecordFoundResult(optionToReturn, false); + } + + @Override + public String name() { + return "testable-wal-filter"; + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java new file mode 100644 index 000000000..2826b128f --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java @@ -0,0 +1,76 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import java.util.Arrays; +import java.util.List; + +import org.junit.ClassRule; +import org.junit.Test; +import org.rocksdb.util.CapturingWriteBatchHandler; +import org.rocksdb.util.CapturingWriteBatchHandler.Event; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.rocksdb.util.CapturingWriteBatchHandler.Action.*; + + +public class WriteBatchHandlerTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Test + public void writeBatchHandler() throws RocksDBException { + // setup test data + final List testEvents = Arrays.asList( + new Event(DELETE, "k0".getBytes(), null), + new Event(PUT, "k1".getBytes(), "v1".getBytes()), + new Event(PUT, "k2".getBytes(), "v2".getBytes()), + new Event(PUT, "k3".getBytes(), "v3".getBytes()), + new Event(LOG, null, "log1".getBytes()), + new Event(MERGE, "k2".getBytes(), "v22".getBytes()), + new Event(DELETE, "k3".getBytes(), null) + ); + + // load test data to the write batch + try (final WriteBatch batch = new WriteBatch()) { + for (final Event testEvent : testEvents) { + switch (testEvent.action) { + + case PUT: + batch.put(testEvent.key, testEvent.value); + break; + + case MERGE: + batch.merge(testEvent.key, testEvent.value); + break; + + case DELETE: + batch.delete(testEvent.key); + break; + + case LOG: + batch.putLogData(testEvent.value); + break; + } + } + + // attempt to read test data back from the WriteBatch by iterating + // with a handler + try (final CapturingWriteBatchHandler handler = + new CapturingWriteBatchHandler()) { + batch.iterate(handler); + + // compare the results to the test data + final List actualEvents = + handler.getEvents(); + assertThat(testEvents.size()).isSameAs(actualEvents.size()); + + assertThat(testEvents).isEqualTo(actualEvents); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java new file mode 100644 index 000000000..cc3ad26eb --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java @@ -0,0 +1,528 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +package org.rocksdb; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; +import static org.rocksdb.util.CapturingWriteBatchHandler.Action.DELETE; +import static org.rocksdb.util.CapturingWriteBatchHandler.Action.DELETE_RANGE; +import static org.rocksdb.util.CapturingWriteBatchHandler.Action.LOG; +import static org.rocksdb.util.CapturingWriteBatchHandler.Action.MERGE; +import static org.rocksdb.util.CapturingWriteBatchHandler.Action.PUT; +import static org.rocksdb.util.CapturingWriteBatchHandler.Action.SINGLE_DELETE; + +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.util.CapturingWriteBatchHandler; +import org.rocksdb.util.CapturingWriteBatchHandler.Event; +import org.rocksdb.util.WriteBatchGetter; + +/** + * This class mimics the db/write_batch_test.cc + * in the c++ rocksdb library. + */ +public class WriteBatchTest { + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void emptyWriteBatch() { + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.count()).isEqualTo(0); + } + } + + @Test + public void multipleBatchOperations() + throws RocksDBException { + + final byte[] foo = "foo".getBytes(UTF_8); + final byte[] bar = "bar".getBytes(UTF_8); + final byte[] box = "box".getBytes(UTF_8); + final byte[] baz = "baz".getBytes(UTF_8); + final byte[] boo = "boo".getBytes(UTF_8); + final byte[] hoo = "hoo".getBytes(UTF_8); + final byte[] hello = "hello".getBytes(UTF_8); + + try (final WriteBatch batch = new WriteBatch()) { + batch.put(foo, bar); + batch.delete(box); + batch.put(baz, boo); + batch.merge(baz, hoo); + batch.singleDelete(foo); + batch.deleteRange(baz, foo); + batch.putLogData(hello); + + try(final CapturingWriteBatchHandler handler = + new CapturingWriteBatchHandler()) { + batch.iterate(handler); + + assertThat(handler.getEvents().size()).isEqualTo(7); + + assertThat(handler.getEvents().get(0)).isEqualTo(new Event(PUT, foo, bar)); + assertThat(handler.getEvents().get(1)).isEqualTo(new Event(DELETE, box, null)); + assertThat(handler.getEvents().get(2)).isEqualTo(new Event(PUT, baz, boo)); + assertThat(handler.getEvents().get(3)).isEqualTo(new Event(MERGE, baz, hoo)); + assertThat(handler.getEvents().get(4)).isEqualTo(new Event(SINGLE_DELETE, foo, null)); + assertThat(handler.getEvents().get(5)).isEqualTo(new Event(DELETE_RANGE, baz, foo)); + assertThat(handler.getEvents().get(6)).isEqualTo(new Event(LOG, null, hello)); + } + } + } + + @Test + public void multipleBatchOperationsDirect() + throws UnsupportedEncodingException, RocksDBException { + try (WriteBatch batch = new WriteBatch()) { + ByteBuffer key = ByteBuffer.allocateDirect(16); + ByteBuffer value = ByteBuffer.allocateDirect(16); + key.put("foo".getBytes("US-ASCII")).flip(); + value.put("bar".getBytes("US-ASCII")).flip(); + batch.put(key, value); + assertThat(key.position()).isEqualTo(3); + assertThat(key.limit()).isEqualTo(3); + assertThat(value.position()).isEqualTo(3); + assertThat(value.limit()).isEqualTo(3); + + key.clear(); + key.put("box".getBytes("US-ASCII")).flip(); + batch.delete(key); + assertThat(key.position()).isEqualTo(3); + assertThat(key.limit()).isEqualTo(3); + + batch.put("baz".getBytes("US-ASCII"), "boo".getBytes("US-ASCII")); + + WriteBatchTestInternalHelper.setSequence(batch, 100); + assertThat(WriteBatchTestInternalHelper.sequence(batch)).isNotNull().isEqualTo(100); + assertThat(batch.count()).isEqualTo(3); + assertThat(new String(getContents(batch), "US-ASCII")) + .isEqualTo("Put(baz, boo)@102" + + "Delete(box)@101" + + "Put(foo, bar)@100"); + } + } + + @Test + public void testAppendOperation() + throws RocksDBException { + try (final WriteBatch b1 = new WriteBatch(); + final WriteBatch b2 = new WriteBatch()) { + WriteBatchTestInternalHelper.setSequence(b1, 200); + WriteBatchTestInternalHelper.setSequence(b2, 300); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat(getContents(b1).length).isEqualTo(0); + assertThat(b1.count()).isEqualTo(0); + b2.put("a".getBytes(UTF_8), "va".getBytes(UTF_8)); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat("Put(a, va)@200".equals(new String(getContents(b1), + UTF_8))); + assertThat(b1.count()).isEqualTo(1); + b2.clear(); + b2.put("b".getBytes(UTF_8), "vb".getBytes(UTF_8)); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat(("Put(a, va)@200" + + "Put(b, vb)@201") + .equals(new String(getContents(b1), UTF_8))); + assertThat(b1.count()).isEqualTo(2); + b2.delete("foo".getBytes(UTF_8)); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat(("Put(a, va)@200" + + "Put(b, vb)@202" + + "Put(b, vb)@201" + + "Delete(foo)@203") + .equals(new String(getContents(b1), UTF_8))); + assertThat(b1.count()).isEqualTo(4); + } + } + + @Test + public void blobOperation() + throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + batch.put("k1".getBytes(UTF_8), "v1".getBytes(UTF_8)); + batch.put("k2".getBytes(UTF_8), "v2".getBytes(UTF_8)); + batch.put("k3".getBytes(UTF_8), "v3".getBytes(UTF_8)); + batch.putLogData("blob1".getBytes(UTF_8)); + batch.delete("k2".getBytes(UTF_8)); + batch.putLogData("blob2".getBytes(UTF_8)); + batch.merge("foo".getBytes(UTF_8), "bar".getBytes(UTF_8)); + assertThat(batch.count()).isEqualTo(5); + assertThat(("Merge(foo, bar)@4" + + "Put(k1, v1)@0" + + "Delete(k2)@3" + + "Put(k2, v2)@1" + + "Put(k3, v3)@2") + .equals(new String(getContents(batch), UTF_8))); + } + } + + @Test + public void savePoints() + throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + batch.put("k1".getBytes(UTF_8), "v1".getBytes(UTF_8)); + batch.put("k2".getBytes(UTF_8), "v2".getBytes(UTF_8)); + batch.put("k3".getBytes(UTF_8), "v3".getBytes(UTF_8)); + + assertThat(getFromWriteBatch(batch, "k1")).isEqualTo("v1"); + assertThat(getFromWriteBatch(batch, "k2")).isEqualTo("v2"); + assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3"); + + batch.setSavePoint(); + + batch.delete("k2".getBytes(UTF_8)); + batch.put("k3".getBytes(UTF_8), "v3-2".getBytes(UTF_8)); + + assertThat(getFromWriteBatch(batch, "k2")).isNull(); + assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-2"); + + + batch.setSavePoint(); + + batch.put("k3".getBytes(UTF_8), "v3-3".getBytes(UTF_8)); + batch.put("k4".getBytes(UTF_8), "v4".getBytes(UTF_8)); + + assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-3"); + assertThat(getFromWriteBatch(batch, "k4")).isEqualTo("v4"); + + + batch.rollbackToSavePoint(); + + assertThat(getFromWriteBatch(batch, "k2")).isNull(); + assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-2"); + assertThat(getFromWriteBatch(batch, "k4")).isNull(); + + + batch.rollbackToSavePoint(); + + assertThat(getFromWriteBatch(batch, "k1")).isEqualTo("v1"); + assertThat(getFromWriteBatch(batch, "k2")).isEqualTo("v2"); + assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3"); + assertThat(getFromWriteBatch(batch, "k4")).isNull(); + } + } + + @Test + public void deleteRange() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final WriteBatch batch = new WriteBatch(); + final WriteOptions wOpt = new WriteOptions()) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + db.put("key3".getBytes(), "abcdefg".getBytes()); + db.put("key4".getBytes(), "xyz".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo("12345678".getBytes()); + assertThat(db.get("key3".getBytes())).isEqualTo("abcdefg".getBytes()); + assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes()); + + batch.deleteRange("key2".getBytes(), "key4".getBytes()); + db.write(wOpt, batch); + + assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes()); + assertThat(db.get("key2".getBytes())).isNull(); + assertThat(db.get("key3".getBytes())).isNull(); + assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes()); + } + } + + @Test + public void restorePoints() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + + batch.put("k1".getBytes(), "v1".getBytes()); + batch.put("k2".getBytes(), "v2".getBytes()); + + batch.setSavePoint(); + + batch.put("k1".getBytes(), "123456789".getBytes()); + batch.delete("k2".getBytes()); + + batch.rollbackToSavePoint(); + + try(final CapturingWriteBatchHandler handler = new CapturingWriteBatchHandler()) { + batch.iterate(handler); + + assertThat(handler.getEvents().size()).isEqualTo(2); + assertThat(handler.getEvents().get(0)).isEqualTo(new Event(PUT, "k1".getBytes(), "v1".getBytes())); + assertThat(handler.getEvents().get(1)).isEqualTo(new Event(PUT, "k2".getBytes(), "v2".getBytes())); + } + } + } + + @Test(expected = RocksDBException.class) + public void restorePoints_withoutSavePoints() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + batch.rollbackToSavePoint(); + } + } + + @Test(expected = RocksDBException.class) + public void restorePoints_withoutSavePoints_nested() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + + batch.setSavePoint(); + batch.rollbackToSavePoint(); + + // without previous corresponding setSavePoint + batch.rollbackToSavePoint(); + } + } + + @Test + public void popSavePoint() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + + batch.put("k1".getBytes(), "v1".getBytes()); + batch.put("k2".getBytes(), "v2".getBytes()); + + batch.setSavePoint(); + + batch.put("k1".getBytes(), "123456789".getBytes()); + batch.delete("k2".getBytes()); + + batch.setSavePoint(); + + batch.popSavePoint(); + + batch.rollbackToSavePoint(); + + try(final CapturingWriteBatchHandler handler = new CapturingWriteBatchHandler()) { + batch.iterate(handler); + + assertThat(handler.getEvents().size()).isEqualTo(2); + assertThat(handler.getEvents().get(0)).isEqualTo(new Event(PUT, "k1".getBytes(), "v1".getBytes())); + assertThat(handler.getEvents().get(1)).isEqualTo(new Event(PUT, "k2".getBytes(), "v2".getBytes())); + } + } + } + + @Test(expected = RocksDBException.class) + public void popSavePoint_withoutSavePoints() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + batch.popSavePoint(); + } + } + + @Test(expected = RocksDBException.class) + public void popSavePoint_withoutSavePoints_nested() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + + batch.setSavePoint(); + batch.popSavePoint(); + + // without previous corresponding setSavePoint + batch.popSavePoint(); + } + } + + @Test + public void maxBytes() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + batch.setMaxBytes(19); + + batch.put("k1".getBytes(), "v1".getBytes()); + } + } + + @Test(expected = RocksDBException.class) + public void maxBytes_over() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + batch.setMaxBytes(1); + + batch.put("k1".getBytes(), "v1".getBytes()); + } + } + + @Test + public void data() throws RocksDBException { + try (final WriteBatch batch1 = new WriteBatch()) { + batch1.delete("k0".getBytes()); + batch1.put("k1".getBytes(), "v1".getBytes()); + batch1.put("k2".getBytes(), "v2".getBytes()); + batch1.put("k3".getBytes(), "v3".getBytes()); + batch1.putLogData("log1".getBytes()); + batch1.merge("k2".getBytes(), "v22".getBytes()); + batch1.delete("k3".getBytes()); + + final byte[] serialized = batch1.data(); + + try(final WriteBatch batch2 = new WriteBatch(serialized)) { + assertThat(batch2.count()).isEqualTo(batch1.count()); + + try(final CapturingWriteBatchHandler handler1 = new CapturingWriteBatchHandler()) { + batch1.iterate(handler1); + + try (final CapturingWriteBatchHandler handler2 = new CapturingWriteBatchHandler()) { + batch2.iterate(handler2); + + assertThat(handler1.getEvents().equals(handler2.getEvents())).isTrue(); + } + } + } + } + } + + @Test + public void dataSize() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + batch.put("k1".getBytes(), "v1".getBytes()); + + assertThat(batch.getDataSize()).isEqualTo(19); + } + } + + @Test + public void hasPut() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.hasPut()).isFalse(); + + batch.put("k1".getBytes(), "v1".getBytes()); + + assertThat(batch.hasPut()).isTrue(); + } + } + + @Test + public void hasDelete() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.hasDelete()).isFalse(); + + batch.delete("k1".getBytes()); + + assertThat(batch.hasDelete()).isTrue(); + } + } + + @Test + public void hasSingleDelete() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.hasSingleDelete()).isFalse(); + + batch.singleDelete("k1".getBytes()); + + assertThat(batch.hasSingleDelete()).isTrue(); + } + } + + @Test + public void hasDeleteRange() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.hasDeleteRange()).isFalse(); + + batch.deleteRange("k1".getBytes(), "k2".getBytes()); + + assertThat(batch.hasDeleteRange()).isTrue(); + } + } + + @Test + public void hasBeginPrepareRange() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.hasBeginPrepare()).isFalse(); + } + } + + @Test + public void hasEndPrepareRange() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.hasEndPrepare()).isFalse(); + } + } + + @Test + public void hasCommit() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.hasCommit()).isFalse(); + } + } + + @Test + public void hasRollback() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.hasRollback()).isFalse(); + } + } + + @Test + public void walTerminationPoint() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + WriteBatch.SavePoint walTerminationPoint = batch.getWalTerminationPoint(); + assertThat(walTerminationPoint.isCleared()).isTrue(); + + batch.put("k1".getBytes(UTF_8), "v1".getBytes(UTF_8)); + + batch.markWalTerminationPoint(); + + walTerminationPoint = batch.getWalTerminationPoint(); + assertThat(walTerminationPoint.getSize()).isEqualTo(19); + assertThat(walTerminationPoint.getCount()).isEqualTo(1); + assertThat(walTerminationPoint.getContentFlags()).isEqualTo(2); + } + } + + @Test + public void getWriteBatch() { + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.getWriteBatch()).isEqualTo(batch); + } + } + + static byte[] getContents(final WriteBatch wb) { + return getContents(wb.nativeHandle_); + } + + static String getFromWriteBatch(final WriteBatch wb, final String key) + throws RocksDBException { + final WriteBatchGetter getter = + new WriteBatchGetter(key.getBytes(UTF_8)); + wb.iterate(getter); + if(getter.getValue() != null) { + return new String(getter.getValue(), UTF_8); + } else { + return null; + } + } + + private static native byte[] getContents(final long writeBatchHandle); +} + +/** + * Package-private class which provides java api to access + * c++ WriteBatchInternal. + */ +class WriteBatchTestInternalHelper { + static void setSequence(final WriteBatch wb, final long sn) { + setSequence(wb.nativeHandle_, sn); + } + + static long sequence(final WriteBatch wb) { + return sequence(wb.nativeHandle_); + } + + static void append(final WriteBatch wb1, final WriteBatch wb2) { + append(wb1.nativeHandle_, wb2.nativeHandle_); + } + + private static native void setSequence(final long writeBatchHandle, + final long sn); + + private static native long sequence(final long writeBatchHandle); + + private static native void append(final long writeBatchHandle1, + final long writeBatchHandle2); +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java new file mode 100644 index 000000000..c5090dbce --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java @@ -0,0 +1,104 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb; + +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +import java.nio.ByteBuffer; +import java.util.*; +import java.util.concurrent.*; + +@RunWith(Parameterized.class) +public class WriteBatchThreadedTest { + + @Parameters(name = "WriteBatchThreadedTest(threadCount={0})") + public static Iterable data() { + return Arrays.asList(new Integer[]{1, 10, 50, 100}); + } + + @Parameter + public int threadCount; + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + RocksDB db; + + @Before + public void setUp() throws Exception { + RocksDB.loadLibrary(); + final Options options = new Options() + .setCreateIfMissing(true) + .setIncreaseParallelism(32); + db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); + assert (db != null); + } + + @After + public void tearDown() throws Exception { + if (db != null) { + db.close(); + } + } + + @Test + public void threadedWrites() throws InterruptedException, ExecutionException { + final List> callables = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + final int offset = i * 100; + callables.add(new Callable() { + @Override + public Void call() throws RocksDBException { + try (final WriteBatch wb = new WriteBatch(); + final WriteOptions w_opt = new WriteOptions()) { + for (int i = offset; i < offset + 100; i++) { + wb.put(ByteBuffer.allocate(4).putInt(i).array(), "parallel rocks test".getBytes()); + } + db.write(w_opt, wb); + } + return null; + } + }); + } + + //submit the callables + final ExecutorService executorService = + Executors.newFixedThreadPool(threadCount); + try { + final ExecutorCompletionService completionService = + new ExecutorCompletionService<>(executorService); + final Set> futures = new HashSet<>(); + for (final Callable callable : callables) { + futures.add(completionService.submit(callable)); + } + + while (futures.size() > 0) { + final Future future = completionService.take(); + futures.remove(future); + + try { + future.get(); + } catch (final ExecutionException e) { + for (final Future f : futures) { + f.cancel(true); + } + + throw e; + } + } + } finally { + executorService.shutdown(); + executorService.awaitTermination(10, TimeUnit.SECONDS); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java new file mode 100644 index 000000000..b0a0cdc0e --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java @@ -0,0 +1,1068 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +package org.rocksdb; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.util.ByteBufferAllocator; + +public class WriteBatchWithIndexTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void readYourOwnWrites() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + final byte[] k1 = "key1".getBytes(); + final byte[] v1 = "value1".getBytes(); + final byte[] k2 = "key2".getBytes(); + final byte[] v2 = "value2".getBytes(); + + db.put(k1, v1); + db.put(k2, v2); + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final RocksIterator base = db.newIterator(); + final RocksIterator it = wbwi.newIteratorWithBase(base)) { + it.seek(k1); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k1); + assertThat(it.value()).isEqualTo(v1); + + it.seek(k2); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k2); + assertThat(it.value()).isEqualTo(v2); + + //put data to the write batch and make sure we can read it. + final byte[] k3 = "key3".getBytes(); + final byte[] v3 = "value3".getBytes(); + wbwi.put(k3, v3); + it.seek(k3); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k3); + assertThat(it.value()).isEqualTo(v3); + + //update k2 in the write batch and check the value + final byte[] v2Other = "otherValue2".getBytes(); + wbwi.put(k2, v2Other); + it.seek(k2); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k2); + assertThat(it.value()).isEqualTo(v2Other); + + //delete k1 and make sure we can read back the write + wbwi.delete(k1); + it.seek(k1); + assertThat(it.key()).isNotEqualTo(k1); + + //reinsert k1 and make sure we see the new value + final byte[] v1Other = "otherValue1".getBytes(); + wbwi.put(k1, v1Other); + it.seek(k1); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k1); + assertThat(it.value()).isEqualTo(v1Other); + + //single remove k3 and make sure we can read back the write + wbwi.singleDelete(k3); + it.seek(k3); + assertThat(it.isValid()).isEqualTo(false); + + //reinsert k3 and make sure we see the new value + final byte[] v3Other = "otherValue3".getBytes(); + wbwi.put(k3, v3Other); + it.seek(k3); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k3); + assertThat(it.value()).isEqualTo(v3Other); + } + } + } + + @Test + public void readYourOwnWritesCf() throws RocksDBException { + final List cfNames = + Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + + final List columnFamilyHandleList = new ArrayList<>(); + + // Test open database with column family names + try (final DBOptions options = + new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open( + options, dbFolder.getRoot().getAbsolutePath(), cfNames, columnFamilyHandleList)) { + final ColumnFamilyHandle newCf = columnFamilyHandleList.get(1); + + try { + final byte[] k1 = "key1".getBytes(); + final byte[] v1 = "value1".getBytes(); + final byte[] k2 = "key2".getBytes(); + final byte[] v2 = "value2".getBytes(); + + db.put(newCf, k1, v1); + db.put(newCf, k2, v2); + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final ReadOptions readOptions = new ReadOptions(); + final RocksIterator base = db.newIterator(newCf, readOptions); + final RocksIterator it = wbwi.newIteratorWithBase(newCf, base, readOptions)) { + it.seek(k1); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k1); + assertThat(it.value()).isEqualTo(v1); + + it.seek(k2); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k2); + assertThat(it.value()).isEqualTo(v2); + + // put data to the write batch and make sure we can read it. + final byte[] k3 = "key3".getBytes(); + final byte[] v3 = "value3".getBytes(); + wbwi.put(newCf, k3, v3); + it.seek(k3); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k3); + assertThat(it.value()).isEqualTo(v3); + + // update k2 in the write batch and check the value + final byte[] v2Other = "otherValue2".getBytes(); + wbwi.put(newCf, k2, v2Other); + it.seek(k2); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k2); + assertThat(it.value()).isEqualTo(v2Other); + + // delete k1 and make sure we can read back the write + wbwi.delete(newCf, k1); + it.seek(k1); + assertThat(it.key()).isNotEqualTo(k1); + + // reinsert k1 and make sure we see the new value + final byte[] v1Other = "otherValue1".getBytes(); + wbwi.put(newCf, k1, v1Other); + it.seek(k1); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k1); + assertThat(it.value()).isEqualTo(v1Other); + + // single remove k3 and make sure we can read back the write + wbwi.singleDelete(newCf, k3); + it.seek(k3); + assertThat(it.isValid()).isEqualTo(false); + + // reinsert k3 and make sure we see the new value + final byte[] v3Other = "otherValue3".getBytes(); + wbwi.put(newCf, k3, v3Other); + it.seek(k3); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k3); + assertThat(it.value()).isEqualTo(v3Other); + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test + public void readYourOwnWritesCfIterDirectBB() throws RocksDBException { + readYourOwnWritesCfIterDirect(ByteBufferAllocator.DIRECT); + } + + @Test + public void readYourOwnWritesCfIterIndirectBB() throws RocksDBException { + readYourOwnWritesCfIterDirect(ByteBufferAllocator.HEAP); + } + + public void readYourOwnWritesCfIterDirect(final ByteBufferAllocator byteBufferAllocator) + throws RocksDBException { + final List cfNames = + Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + + final List columnFamilyHandleList = new ArrayList<>(); + + // Test open database with column family names + try (final DBOptions options = + new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open( + options, dbFolder.getRoot().getAbsolutePath(), cfNames, columnFamilyHandleList)) { + final ColumnFamilyHandle newCf = columnFamilyHandleList.get(1); + + try { + final byte[] kv1 = "key1".getBytes(); + final byte[] vv1 = "value1".getBytes(); + final ByteBuffer k1 = byteBufferAllocator.allocate(12); + k1.put(kv1); + final byte[] kv2 = "key2".getBytes(); + final byte[] vv2 = "value2".getBytes(); + final ByteBuffer k2 = byteBufferAllocator.allocate(12); + k2.put(kv2); + + db.put(newCf, kv1, vv1); + db.put(newCf, kv2, vv2); + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final ReadOptions readOptions = new ReadOptions(); + final RocksIterator base = db.newIterator(newCf, readOptions); + final RocksIterator it = wbwi.newIteratorWithBase(newCf, base, readOptions)) { + k1.flip(); + it.seek(k1); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv1); + assertThat(it.value()).isEqualTo(vv1); + + k2.flip(); + it.seek(k2); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv2); + assertThat(it.value()).isEqualTo(vv2); + + final byte[] kv1point5 = "key1point5".getBytes(); + final ByteBuffer k1point5 = byteBufferAllocator.allocate(12); + k1point5.put(kv1point5); + + k1point5.flip(); + it.seek(k1point5); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv2); + assertThat(it.value()).isEqualTo(vv2); + + k1point5.flip(); + it.seekForPrev(k1point5); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv1); + assertThat(it.value()).isEqualTo(vv1); + + // put data to the write batch and make sure we can read it. + final byte[] kv3 = "key3".getBytes(); + final ByteBuffer k3 = byteBufferAllocator.allocate(12); + k3.put(kv3); + final byte[] vv3 = "value3".getBytes(); + wbwi.put(newCf, kv3, vv3); + k3.flip(); + it.seek(k3); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv3); + assertThat(it.value()).isEqualTo(vv3); + + // update k2 in the write batch and check the value + final byte[] v2Other = "otherValue2".getBytes(); + wbwi.put(newCf, kv2, v2Other); + k2.flip(); + it.seek(k2); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv2); + assertThat(it.value()).isEqualTo(v2Other); + + // delete k1 and make sure we can read back the write + wbwi.delete(newCf, kv1); + k1.flip(); + it.seek(k1); + assertThat(it.key()).isNotEqualTo(kv1); + + // reinsert k1 and make sure we see the new value + final byte[] v1Other = "otherValue1".getBytes(); + wbwi.put(newCf, kv1, v1Other); + k1.flip(); + it.seek(k1); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv1); + assertThat(it.value()).isEqualTo(v1Other); + + // single remove k3 and make sure we can read back the write + wbwi.singleDelete(newCf, kv3); + k3.flip(); + it.seek(k3); + assertThat(it.isValid()).isEqualTo(false); + + // reinsert k3 and make sure we see the new value + final byte[] v3Other = "otherValue3".getBytes(); + wbwi.put(newCf, kv3, v3Other); + k3.flip(); + it.seek(k3); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv3); + assertThat(it.value()).isEqualTo(v3Other); + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test + public void readYourOwnWritesCfIterIndirect() throws RocksDBException { + final List cfNames = + Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + + final List columnFamilyHandleList = new ArrayList<>(); + + // Test open database with column family names + try (final DBOptions options = + new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open( + options, dbFolder.getRoot().getAbsolutePath(), cfNames, columnFamilyHandleList)) { + final ColumnFamilyHandle newCf = columnFamilyHandleList.get(1); + + try { + final byte[] kv1 = "key1".getBytes(); + final byte[] vv1 = "value1".getBytes(); + final ByteBuffer k1 = ByteBuffer.allocate(12); + k1.put(kv1).flip(); + final byte[] kv2 = "key2".getBytes(); + final byte[] vv2 = "value2".getBytes(); + final ByteBuffer k2 = ByteBuffer.allocate(12); + k2.put(kv2).flip(); + + db.put(newCf, kv1, vv1); + db.put(newCf, kv2, vv2); + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final ReadOptions readOptions = new ReadOptions(); + final RocksIterator base = db.newIterator(newCf, readOptions); + final RocksIterator it = wbwi.newIteratorWithBase(newCf, base, readOptions)) { + it.seek(k1); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv1); + assertThat(it.value()).isEqualTo(vv1); + + it.seek(k2); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv2); + assertThat(it.value()).isEqualTo(vv2); + + // put data to the write batch and make sure we can read it. + final byte[] kv3 = "key3".getBytes(); + final ByteBuffer k3 = ByteBuffer.allocate(12); + k3.put(kv3); + final byte[] vv3 = "value3".getBytes(); + wbwi.put(newCf, kv3, vv3); + k3.flip(); + it.seek(k3); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv3); + assertThat(it.value()).isEqualTo(vv3); + + // update k2 in the write batch and check the value + final byte[] v2Other = "otherValue2".getBytes(); + wbwi.put(newCf, kv2, v2Other); + k2.flip(); + it.seek(k2); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv2); + assertThat(it.value()).isEqualTo(v2Other); + + // delete k1 and make sure we can read back the write + wbwi.delete(newCf, kv1); + k1.flip(); + it.seek(k1); + assertThat(it.key()).isNotEqualTo(kv1); + + // reinsert k1 and make sure we see the new value + final byte[] v1Other = "otherValue1".getBytes(); + wbwi.put(newCf, kv1, v1Other); + k1.flip(); + it.seek(k1); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv1); + assertThat(it.value()).isEqualTo(v1Other); + + // single remove k3 and make sure we can read back the write + wbwi.singleDelete(newCf, kv3); + k3.flip(); + it.seek(k3); + assertThat(it.isValid()).isEqualTo(false); + + // reinsert k3 and make sure we see the new value + final byte[] v3Other = "otherValue3".getBytes(); + wbwi.put(newCf, kv3, v3Other); + k3.flip(); + it.seek(k3); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(kv3); + assertThat(it.value()).isEqualTo(v3Other); + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test + public void writeBatchWithIndex() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + final byte[] k1 = "key1".getBytes(); + final byte[] v1 = "value1".getBytes(); + final byte[] k2 = "key2".getBytes(); + final byte[] v2 = "value2".getBytes(); + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(); + final WriteOptions wOpt = new WriteOptions()) { + wbwi.put(k1, v1); + wbwi.put(k2, v2); + + db.write(wOpt, wbwi); + } + + assertThat(db.get(k1)).isEqualTo(v1); + assertThat(db.get(k2)).isEqualTo(v2); + } + } + + @Test + public void write_writeBatchWithIndexDirect() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + final ByteBuffer k1 = ByteBuffer.allocateDirect(16); + final ByteBuffer v1 = ByteBuffer.allocateDirect(16); + final ByteBuffer k2 = ByteBuffer.allocateDirect(16); + final ByteBuffer v2 = ByteBuffer.allocateDirect(16); + k1.put("key1".getBytes()).flip(); + v1.put("value1".getBytes()).flip(); + k2.put("key2".getBytes()).flip(); + v2.put("value2".getBytes()).flip(); + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + wbwi.put(k1, v1); + assertThat(k1.position()).isEqualTo(4); + assertThat(k1.limit()).isEqualTo(4); + assertThat(v1.position()).isEqualTo(6); + assertThat(v1.limit()).isEqualTo(6); + + wbwi.put(k2, v2); + + db.write(new WriteOptions(), wbwi); + } + + assertThat(db.get("key1".getBytes())).isEqualTo("value1".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo("value2".getBytes()); + } + } + + @Test + public void iterator() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) { + + final String k1 = "key1"; + final String v1 = "value1"; + final String k2 = "key2"; + final String v2 = "value2"; + final String k3 = "key3"; + final String v3 = "value3"; + final String k4 = "key4"; + final String k5 = "key5"; + final String v8 = "value8"; + final byte[] k1b = k1.getBytes(UTF_8); + final byte[] v1b = v1.getBytes(UTF_8); + final byte[] k2b = k2.getBytes(UTF_8); + final byte[] v2b = v2.getBytes(UTF_8); + final byte[] k3b = k3.getBytes(UTF_8); + final byte[] v3b = v3.getBytes(UTF_8); + final byte[] k4b = k4.getBytes(UTF_8); + final byte[] k5b = k5.getBytes(UTF_8); + final byte[] v8b = v8.getBytes(UTF_8); + + final String k1point5 = "key1point5"; + final String k2point5 = "key2point5"; + + // add put records + wbwi.put(k1b, v1b); + wbwi.put(k2b, v2b); + wbwi.put(k3b, v3b); + + // add a deletion record + wbwi.delete(k4b); + + // add a single deletion record + wbwi.singleDelete(k5b); + + // add a log record + wbwi.putLogData(v8b); + + final WBWIRocksIterator.WriteEntry[] expected = { + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(k1), new DirectSlice(v1)), + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(k2), new DirectSlice(v2)), + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(k3), new DirectSlice(v3)), + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.DELETE, + new DirectSlice(k4), DirectSlice.NONE), + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.SINGLE_DELETE, + new DirectSlice(k5), DirectSlice.NONE), + }; + + try (final WBWIRocksIterator it = wbwi.newIterator()) { + //direct access - seek to key offsets + final int[] testOffsets = {2, 0, 3, 4, 1}; + for (final int testOffset : testOffsets) { + final byte[] key = toArray(expected[testOffset].getKey().data()); + + it.seek(key); + assertThat(it.isValid()).isTrue(); + + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry).isEqualTo(expected[testOffset]); + } + + for (final int testOffset : testOffsets) { + final byte[] key = toArray(expected[testOffset].getKey().data()); + + // Direct buffer seek + final ByteBuffer db = expected[testOffset].getKey().data(); + it.seek(db); + assertThat(db.position()).isEqualTo(key.length); + assertThat(it.isValid()).isTrue(); + + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry).isEqualTo(expected[testOffset]); + } + + for (final int testOffset : testOffsets) { + final byte[] key = toArray(expected[testOffset].getKey().data()); + + // Direct buffer seek + final ByteBuffer db = expected[testOffset].getKey().data(); + it.seekForPrev(db); + assertThat(db.position()).isEqualTo(key.length); + assertThat(it.isValid()).isTrue(); + + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry).isEqualTo(expected[testOffset]); + } + + for (final int testOffset : testOffsets) { + final byte[] key = toArray(expected[testOffset].getKey().data()); + + // Indirect buffer seek + final ByteBuffer db = ByteBuffer.allocate(key.length); + System.arraycopy(key, 0, db.array(), 0, key.length); + it.seek(db); + assertThat(db.position()).isEqualTo(key.length); + assertThat(it.isValid()).isTrue(); + + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry).isEqualTo(expected[testOffset]); + } + + for (final int testOffset : testOffsets) { + final byte[] key = toArray(expected[testOffset].getKey().data()); + + // Indirect buffer seek for prev + final ByteBuffer db = ByteBuffer.allocate(key.length); + System.arraycopy(key, 0, db.array(), 0, key.length); + it.seekForPrev(db); + assertThat(db.position()).isEqualTo(key.length); + assertThat(it.isValid()).isTrue(); + + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry).isEqualTo(expected[testOffset]); + } + + { + it.seekForPrev(k2point5.getBytes()); + assertThat(it.isValid()).isTrue(); + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry).isEqualTo(expected[1]); + } + + { + it.seekForPrev(k1point5.getBytes()); + assertThat(it.isValid()).isTrue(); + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry).isEqualTo(expected[0]); + } + + { + final ByteBuffer db = ByteBuffer.allocate(k2point5.length()); + db.put(k2point5.getBytes()); + db.flip(); + it.seekForPrev(db); + assertThat(it.isValid()).isTrue(); + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry).isEqualTo(expected[1]); + } + + { + final ByteBuffer db = ByteBuffer.allocate(k1point5.length()); + db.put(k1point5.getBytes()); + db.flip(); + it.seekForPrev(db); + assertThat(it.isValid()).isTrue(); + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry).isEqualTo(expected[0]); + } + + //forward iterative access + int i = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + assertThat(it.entry()).isEqualTo(expected[i++]); + } + + //reverse iterative access + i = expected.length - 1; + for (it.seekToLast(); it.isValid(); it.prev()) { + assertThat(it.entry()).isEqualTo(expected[i--]); + } + } + } + } + + @Test + public void zeroByteTests() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) { + final byte[] zeroByteValue = new byte[]{0, 0}; + //add zero byte value + wbwi.put(zeroByteValue, zeroByteValue); + + final ByteBuffer buffer = ByteBuffer.allocateDirect(zeroByteValue.length); + buffer.put(zeroByteValue); + + final WBWIRocksIterator.WriteEntry expected = + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(buffer, zeroByteValue.length), + new DirectSlice(buffer, zeroByteValue.length)); + + try (final WBWIRocksIterator it = wbwi.newIterator()) { + it.seekToFirst(); + final WBWIRocksIterator.WriteEntry actual = it.entry(); + assertThat(actual.equals(expected)).isTrue(); + assertThat(it.entry().hashCode() == expected.hashCode()).isTrue(); + } + } + } + + @Test + public void savePoints() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final ReadOptions readOptions = new ReadOptions()) { + wbwi.put("k1".getBytes(), "v1".getBytes()); + wbwi.put("k2".getBytes(), "v2".getBytes()); + wbwi.put("k3".getBytes(), "v3".getBytes()); + + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k1")) + .isEqualTo("v1"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2")) + .isEqualTo("v2"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3")) + .isEqualTo("v3"); + + + wbwi.setSavePoint(); + + wbwi.delete("k2".getBytes()); + wbwi.put("k3".getBytes(), "v3-2".getBytes()); + + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2")) + .isNull(); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3")) + .isEqualTo("v3-2"); + + + wbwi.setSavePoint(); + + wbwi.put("k3".getBytes(), "v3-3".getBytes()); + wbwi.put("k4".getBytes(), "v4".getBytes()); + + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3")) + .isEqualTo("v3-3"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4")) + .isEqualTo("v4"); + + + wbwi.rollbackToSavePoint(); + + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2")) + .isNull(); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3")) + .isEqualTo("v3-2"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4")) + .isNull(); + + + wbwi.rollbackToSavePoint(); + + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k1")) + .isEqualTo("v1"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2")) + .isEqualTo("v2"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3")) + .isEqualTo("v3"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4")) + .isNull(); + } + } + } + + @Test + public void restorePoints() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + + wbwi.put("k1".getBytes(UTF_8), "v1".getBytes(UTF_8)); + wbwi.put("k2".getBytes(UTF_8), "v2".getBytes(UTF_8)); + + wbwi.setSavePoint(); + + wbwi.put("k1".getBytes(UTF_8), "123456789".getBytes(UTF_8)); + wbwi.delete("k2".getBytes(UTF_8)); + + wbwi.rollbackToSavePoint(); + + try(final DBOptions options = new DBOptions()) { + assertThat(wbwi.getFromBatch(options,"k1".getBytes(UTF_8))).isEqualTo("v1".getBytes()); + assertThat(wbwi.getFromBatch(options,"k2".getBytes(UTF_8))).isEqualTo("v2".getBytes()); + } + } + } + + @Test(expected = RocksDBException.class) + public void restorePoints_withoutSavePoints() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + wbwi.rollbackToSavePoint(); + } + } + + @Test(expected = RocksDBException.class) + public void restorePoints_withoutSavePoints_nested() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + + wbwi.setSavePoint(); + wbwi.rollbackToSavePoint(); + + // without previous corresponding setSavePoint + wbwi.rollbackToSavePoint(); + } + } + + @Test + public void popSavePoint() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + + wbwi.put("k1".getBytes(), "v1".getBytes()); + wbwi.put("k2".getBytes(), "v2".getBytes()); + + wbwi.setSavePoint(); + + wbwi.put("k1".getBytes(), "123456789".getBytes()); + wbwi.delete("k2".getBytes()); + + wbwi.setSavePoint(); + + wbwi.popSavePoint(); + + wbwi.rollbackToSavePoint(); + + try(final DBOptions options = new DBOptions()) { + assertThat(wbwi.getFromBatch(options,"k1".getBytes(UTF_8))).isEqualTo("v1".getBytes()); + assertThat(wbwi.getFromBatch(options,"k2".getBytes(UTF_8))).isEqualTo("v2".getBytes()); + } + } + } + + @Test(expected = RocksDBException.class) + public void popSavePoint_withoutSavePoints() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + wbwi.popSavePoint(); + } + } + + @Test(expected = RocksDBException.class) + public void popSavePoint_withoutSavePoints_nested() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + + wbwi.setSavePoint(); + wbwi.popSavePoint(); + + // without previous corresponding setSavePoint + wbwi.popSavePoint(); + } + } + + @Test + public void maxBytes() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + wbwi.setMaxBytes(19); + + wbwi.put("k1".getBytes(), "v1".getBytes()); + } + } + + @Test(expected = RocksDBException.class) + public void maxBytes_over() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + wbwi.setMaxBytes(1); + + wbwi.put("k1".getBytes(), "v1".getBytes()); + } + } + + @Test + public void getWriteBatch() { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + + final WriteBatch wb = wbwi.getWriteBatch(); + assertThat(wb).isNotNull(); + assertThat(wb.isOwningHandle()).isFalse(); + } + } + + private static String getFromWriteBatchWithIndex(final RocksDB db, + final ReadOptions readOptions, final WriteBatchWithIndex wbwi, + final String skey) { + final byte[] key = skey.getBytes(); + try (final RocksIterator baseIterator = db.newIterator(readOptions); + final RocksIterator iterator = wbwi.newIteratorWithBase(baseIterator)) { + iterator.seek(key); + + // Arrays.equals(key, iterator.key()) ensures an exact match in Rocks, + // instead of a nearest match + return iterator.isValid() && + Arrays.equals(key, iterator.key()) ? + new String(iterator.value()) : null; + } + } + + @Test + public void getFromBatch() throws RocksDBException { + final byte[] k1 = "k1".getBytes(); + final byte[] k2 = "k2".getBytes(); + final byte[] k3 = "k3".getBytes(); + final byte[] k4 = "k4".getBytes(); + + final byte[] v1 = "v1".getBytes(); + final byte[] v2 = "v2".getBytes(); + final byte[] v3 = "v3".getBytes(); + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final DBOptions dbOptions = new DBOptions()) { + wbwi.put(k1, v1); + wbwi.put(k2, v2); + wbwi.put(k3, v3); + + assertThat(wbwi.getFromBatch(dbOptions, k1)).isEqualTo(v1); + assertThat(wbwi.getFromBatch(dbOptions, k2)).isEqualTo(v2); + assertThat(wbwi.getFromBatch(dbOptions, k3)).isEqualTo(v3); + assertThat(wbwi.getFromBatch(dbOptions, k4)).isNull(); + + wbwi.delete(k2); + + assertThat(wbwi.getFromBatch(dbOptions, k2)).isNull(); + } + } + + @Test + public void getFromBatchAndDB() throws RocksDBException { + final byte[] k1 = "k1".getBytes(); + final byte[] k2 = "k2".getBytes(); + final byte[] k3 = "k3".getBytes(); + final byte[] k4 = "k4".getBytes(); + + final byte[] v1 = "v1".getBytes(); + final byte[] v2 = "v2".getBytes(); + final byte[] v3 = "v3".getBytes(); + final byte[] v4 = "v4".getBytes(); + + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + db.put(k1, v1); + db.put(k2, v2); + db.put(k4, v4); + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final DBOptions dbOptions = new DBOptions(); + final ReadOptions readOptions = new ReadOptions()) { + + assertThat(wbwi.getFromBatch(dbOptions, k1)).isNull(); + assertThat(wbwi.getFromBatch(dbOptions, k2)).isNull(); + assertThat(wbwi.getFromBatch(dbOptions, k4)).isNull(); + + wbwi.put(k3, v3); + + assertThat(wbwi.getFromBatch(dbOptions, k3)).isEqualTo(v3); + + assertThat(wbwi.getFromBatchAndDB(db, readOptions, k1)).isEqualTo(v1); + assertThat(wbwi.getFromBatchAndDB(db, readOptions, k2)).isEqualTo(v2); + assertThat(wbwi.getFromBatchAndDB(db, readOptions, k3)).isEqualTo(v3); + assertThat(wbwi.getFromBatchAndDB(db, readOptions, k4)).isEqualTo(v4); + + wbwi.delete(k4); + + assertThat(wbwi.getFromBatchAndDB(db, readOptions, k4)).isNull(); + } + } + } + private byte[] toArray(final ByteBuffer buf) { + final byte[] ary = new byte[buf.remaining()]; + buf.get(ary); + return ary; + } + + @Test + public void deleteRange() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final WriteBatch batch = new WriteBatch(); + final WriteOptions wOpt = new WriteOptions()) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + db.put("key3".getBytes(), "abcdefg".getBytes()); + db.put("key4".getBytes(), "xyz".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo("12345678".getBytes()); + assertThat(db.get("key3".getBytes())).isEqualTo("abcdefg".getBytes()); + assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes()); + + batch.deleteRange("key2".getBytes(), "key4".getBytes()); + db.write(wOpt, batch); + + assertThat(db.get("key1".getBytes())).isEqualTo("value".getBytes()); + assertThat(db.get("key2".getBytes())).isNull(); + assertThat(db.get("key3".getBytes())).isNull(); + assertThat(db.get("key4".getBytes())).isEqualTo("xyz".getBytes()); + } + } + + @Test + public void iteratorWithBaseOverwriteTrue() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final RocksIterator baseIter = db.newIterator(); + final RocksIterator wbwiIter = wbwi.newIteratorWithBase(baseIter)) { + assertThat(wbwiIter).isNotNull(); + assertThat(wbwiIter.nativeHandle_).isGreaterThan(0); + wbwiIter.status(); + } + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final RocksIterator baseIter = db.newIterator(); + final ReadOptions readOptions = new ReadOptions(); + final RocksIterator wbwiIter = wbwi.newIteratorWithBase(baseIter, readOptions)) { + assertThat(wbwiIter).isNotNull(); + assertThat(wbwiIter.nativeHandle_).isGreaterThan(0); + wbwiIter.status(); + } + } + + final List cfNames = + Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = + new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open( + options, dbFolder.getRoot().getAbsolutePath(), cfNames, columnFamilyHandleList)) { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final RocksIterator baseIter = db.newIterator(); + final RocksIterator wbwiIter = + wbwi.newIteratorWithBase(columnFamilyHandleList.get(1), baseIter)) { + assertThat(wbwiIter).isNotNull(); + assertThat(wbwiIter.nativeHandle_).isGreaterThan(0); + wbwiIter.status(); + } + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final RocksIterator baseIter = db.newIterator(); + final ReadOptions readOptions = new ReadOptions(); + final RocksIterator wbwiIter = + wbwi.newIteratorWithBase(columnFamilyHandleList.get(1), baseIter, readOptions)) { + assertThat(wbwiIter).isNotNull(); + assertThat(wbwiIter.nativeHandle_).isGreaterThan(0); + wbwiIter.status(); + } + } + } + + @Test + public void iteratorWithBaseOverwriteFalse() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(false); + final RocksIterator baseIter = db.newIterator(); + final RocksIterator wbwiIter = wbwi.newIteratorWithBase(baseIter)) { + assertThat(wbwiIter).isNotNull(); + assertThat(wbwiIter.nativeHandle_).isGreaterThan(0); + wbwiIter.status(); + } + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(false); + final RocksIterator baseIter = db.newIterator(); + final ReadOptions readOptions = new ReadOptions(); + final RocksIterator wbwiIter = wbwi.newIteratorWithBase(baseIter, readOptions)) { + assertThat(wbwiIter).isNotNull(); + assertThat(wbwiIter.nativeHandle_).isGreaterThan(0); + wbwiIter.status(); + } + } + + final List cfNames = + Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = + new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open( + options, dbFolder.getRoot().getAbsolutePath(), cfNames, columnFamilyHandleList)) { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(false); + final RocksIterator baseIter = db.newIterator(); + final RocksIterator wbwiIter = + wbwi.newIteratorWithBase(columnFamilyHandleList.get(1), baseIter)) { + assertThat(wbwiIter).isNotNull(); + assertThat(wbwiIter.nativeHandle_).isGreaterThan(0); + wbwiIter.status(); + } + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(false); + final RocksIterator baseIter = db.newIterator(); + final ReadOptions readOptions = new ReadOptions(); + final RocksIterator wbwiIter = + wbwi.newIteratorWithBase(columnFamilyHandleList.get(1), baseIter, readOptions)) { + assertThat(wbwiIter).isNotNull(); + assertThat(wbwiIter.nativeHandle_).isGreaterThan(0); + wbwiIter.status(); + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java new file mode 100644 index 000000000..735677cb7 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java @@ -0,0 +1,75 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Random; +import org.junit.ClassRule; +import org.junit.Test; + +public class WriteOptionsTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + public static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void writeOptions() { + try (final WriteOptions writeOptions = new WriteOptions()) { + + writeOptions.setSync(true); + assertThat(writeOptions.sync()).isTrue(); + writeOptions.setSync(false); + assertThat(writeOptions.sync()).isFalse(); + + writeOptions.setDisableWAL(true); + assertThat(writeOptions.disableWAL()).isTrue(); + writeOptions.setDisableWAL(false); + assertThat(writeOptions.disableWAL()).isFalse(); + + + writeOptions.setIgnoreMissingColumnFamilies(true); + assertThat(writeOptions.ignoreMissingColumnFamilies()).isTrue(); + writeOptions.setIgnoreMissingColumnFamilies(false); + assertThat(writeOptions.ignoreMissingColumnFamilies()).isFalse(); + + writeOptions.setNoSlowdown(true); + assertThat(writeOptions.noSlowdown()).isTrue(); + writeOptions.setNoSlowdown(false); + assertThat(writeOptions.noSlowdown()).isFalse(); + + writeOptions.setLowPri(true); + assertThat(writeOptions.lowPri()).isTrue(); + writeOptions.setLowPri(false); + assertThat(writeOptions.lowPri()).isFalse(); + + writeOptions.setMemtableInsertHintPerBatch(true); + assertThat(writeOptions.memtableInsertHintPerBatch()).isTrue(); + writeOptions.setMemtableInsertHintPerBatch(false); + assertThat(writeOptions.memtableInsertHintPerBatch()).isFalse(); + } + } + + @Test + public void copyConstructor() { + WriteOptions origOpts = new WriteOptions(); + origOpts.setDisableWAL(rand.nextBoolean()); + origOpts.setIgnoreMissingColumnFamilies(rand.nextBoolean()); + origOpts.setSync(rand.nextBoolean()); + origOpts.setMemtableInsertHintPerBatch(true); + WriteOptions copyOpts = new WriteOptions(origOpts); + assertThat(origOpts.disableWAL()).isEqualTo(copyOpts.disableWAL()); + assertThat(origOpts.ignoreMissingColumnFamilies()).isEqualTo( + copyOpts.ignoreMissingColumnFamilies()); + assertThat(origOpts.sync()).isEqualTo(copyOpts.sync()); + assertThat(origOpts.memtableInsertHintPerBatch()) + .isEqualTo(copyOpts.memtableInsertHintPerBatch()); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java b/src/rocksdb/java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java new file mode 100644 index 000000000..c4e4f25a0 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/test/RemoveEmptyValueCompactionFilterFactory.java @@ -0,0 +1,21 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb.test; + +import org.rocksdb.AbstractCompactionFilter; +import org.rocksdb.AbstractCompactionFilterFactory; +import org.rocksdb.RemoveEmptyValueCompactionFilter; + +/** + * Simple CompactionFilterFactory class used in tests. Generates RemoveEmptyValueCompactionFilters. + */ +public class RemoveEmptyValueCompactionFilterFactory extends AbstractCompactionFilterFactory { + @Override + public RemoveEmptyValueCompactionFilter createCompactionFilter(final AbstractCompactionFilter.Context context) { + return new RemoveEmptyValueCompactionFilter(); + } + + @Override + public String name() { + return "RemoveEmptyValueCompactionFilterFactory"; + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java b/src/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java new file mode 100644 index 000000000..42d3148ef --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java @@ -0,0 +1,174 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb.test; + +import org.junit.internal.JUnitSystem; +import org.junit.internal.RealSystem; +import org.junit.internal.TextListener; +import org.junit.runner.Description; +import org.junit.runner.JUnitCore; +import org.junit.runner.Result; +import org.junit.runner.notification.Failure; +import org.rocksdb.RocksDB; + +import java.io.PrintStream; +import java.text.DecimalFormat; +import java.text.NumberFormat; +import java.util.ArrayList; +import java.util.List; + +import static org.rocksdb.test.RocksJunitRunner.RocksJunitListener.Status.*; + +/** + * Custom Junit Runner to print also Test classes + * and executed methods to command prompt. + */ +public class RocksJunitRunner { + + /** + * Listener which overrides default functionality + * to print class and method to system out. + */ + static class RocksJunitListener extends TextListener { + + private final static NumberFormat secsFormat = + new DecimalFormat("###,###.###"); + + private final PrintStream writer; + + private String currentClassName = null; + private String currentMethodName = null; + private Status currentStatus = null; + private long currentTestsStartTime; + private int currentTestsCount = 0; + private int currentTestsIgnoredCount = 0; + private int currentTestsFailureCount = 0; + private int currentTestsErrorCount = 0; + + enum Status { + IGNORED, + FAILURE, + ERROR, + OK + } + + /** + * RocksJunitListener constructor + * + * @param system JUnitSystem + */ + public RocksJunitListener(final JUnitSystem system) { + this(system.out()); + } + + public RocksJunitListener(final PrintStream writer) { + super(writer); + this.writer = writer; + } + + @Override + public void testRunStarted(final Description description) { + writer.format("Starting RocksJava Tests...%n"); + + } + + @Override + public void testStarted(final Description description) { + if(currentClassName == null + || !currentClassName.equals(description.getClassName())) { + if(currentClassName != null) { + printTestsSummary(); + } else { + currentTestsStartTime = System.currentTimeMillis(); + } + writer.format("%nRunning: %s%n", description.getClassName()); + currentClassName = description.getClassName(); + } + currentMethodName = description.getMethodName(); + currentStatus = OK; + currentTestsCount++; + } + + private void printTestsSummary() { + // print summary of last test set + writer.format("Tests run: %d, Failures: %d, Errors: %d, Ignored: %d, Time elapsed: %s sec%n", + currentTestsCount, + currentTestsFailureCount, + currentTestsErrorCount, + currentTestsIgnoredCount, + formatSecs(System.currentTimeMillis() - currentTestsStartTime)); + + // reset counters + currentTestsCount = 0; + currentTestsFailureCount = 0; + currentTestsErrorCount = 0; + currentTestsIgnoredCount = 0; + currentTestsStartTime = System.currentTimeMillis(); + } + + private static String formatSecs(final double milliseconds) { + final double seconds = milliseconds / 1000; + return secsFormat.format(seconds); + } + + @Override + public void testFailure(final Failure failure) { + if (failure.getException() != null + && failure.getException() instanceof AssertionError) { + currentStatus = FAILURE; + currentTestsFailureCount++; + } else { + currentStatus = ERROR; + currentTestsErrorCount++; + } + } + + @Override + public void testIgnored(final Description description) { + currentStatus = IGNORED; + currentTestsIgnoredCount++; + } + + @Override + public void testFinished(final Description description) { + if(currentStatus == OK) { + writer.format("\t%s OK%n",currentMethodName); + } else { + writer.format(" [%s] %s%n", currentStatus.name(), currentMethodName); + } + } + + @Override + public void testRunFinished(final Result result) { + printTestsSummary(); + super.testRunFinished(result); + } + } + + /** + * Main method to execute tests + * + * @param args Test classes as String names + */ + public static void main(final String[] args){ + final JUnitCore runner = new JUnitCore(); + final JUnitSystem system = new RealSystem(); + runner.addListener(new RocksJunitListener(system)); + try { + final List> classes = new ArrayList<>(); + for (final String arg : args) { + classes.add(Class.forName(arg)); + } + final Class[] clazzes = classes.toArray(new Class[classes.size()]); + final Result result = runner.run(clazzes); + if(!result.wasSuccessful()) { + System.exit(-1); + } + } catch (final ClassNotFoundException e) { + e.printStackTrace(); + System.exit(-2); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/test/TestableEventListener.java b/src/rocksdb/java/src/test/java/org/rocksdb/test/TestableEventListener.java new file mode 100644 index 000000000..865ad5cf7 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/test/TestableEventListener.java @@ -0,0 +1,23 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb.test; + +import org.rocksdb.AbstractEventListener; + +public class TestableEventListener extends AbstractEventListener { + public TestableEventListener() { + super(); + } + + public TestableEventListener(final EnabledEventCallback... enabledEventCallbacks) { + super(enabledEventCallbacks); + } + + public void invokeAllCallbacks() { + invokeAllCallbacks(nativeHandle_); + } + + private static native void invokeAllCallbacks(final long handle); +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java new file mode 100644 index 000000000..8d7956cf2 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/ByteBufferAllocator.java @@ -0,0 +1,16 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import java.nio.ByteBuffer; + +public interface ByteBufferAllocator { + ByteBuffer allocate(int capacity); + + ByteBufferAllocator DIRECT = new DirectByteBufferAllocator(); + ByteBufferAllocator HEAP = new HeapByteBufferAllocator(); +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java new file mode 100644 index 000000000..fb7239c92 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorIntTest.java @@ -0,0 +1,267 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.rocksdb.*; + +import java.nio.ByteBuffer; +import java.nio.file.FileSystems; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Similar to {@link IntComparatorTest}, but uses {@link BytewiseComparator} + * which ensures the correct ordering of positive integers. + */ +@RunWith(Parameterized.class) +public class BytewiseComparatorIntTest { + + // test with 500 random positive integer keys + private static final int TOTAL_KEYS = 500; + private static final byte[][] keys = new byte[TOTAL_KEYS][4]; + + @BeforeClass + public static void prepareKeys() { + final ByteBuffer buf = ByteBuffer.allocate(4); + final Random random = new Random(); + for (int i = 0; i < TOTAL_KEYS; i++) { + final int ri = random.nextInt() & Integer.MAX_VALUE; // the & ensures positive integer + buf.putInt(ri); + buf.flip(); + final byte[] key = buf.array(); + + // does key already exist (avoid duplicates) + if (keyExists(key, i)) { + i--; // loop round and generate a different key + } else { + System.arraycopy(key, 0, keys[i], 0, 4); + } + } + } + + private static boolean keyExists(final byte[] key, final int limit) { + for (int j = 0; j < limit; j++) { + if (Arrays.equals(key, keys[j])) { + return true; + } + } + return false; + } + + @Parameters(name = "{0}") + public static Iterable parameters() { + return Arrays.asList(new Object[][] { + { "non-direct_reused64_mutex", false, 64, ReusedSynchronisationType.MUTEX }, + { "direct_reused64_mutex", true, 64, ReusedSynchronisationType.MUTEX }, + { "non-direct_reused64_adaptive-mutex", false, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX }, + { "direct_reused64_adaptive-mutex", true, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX }, + { "non-direct_reused64_thread-local", false, 64, ReusedSynchronisationType.THREAD_LOCAL }, + { "direct_reused64_thread-local", true, 64, ReusedSynchronisationType.THREAD_LOCAL }, + { "non-direct_noreuse", false, -1, null }, + { "direct_noreuse", true, -1, null } + }); + } + + @Parameter(0) + public String name; + + @Parameter(1) + public boolean useDirectBuffer; + + @Parameter(2) + public int maxReusedBufferSize; + + @Parameter(3) + public ReusedSynchronisationType reusedSynchronisationType; + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + + @Test + public void javaComparatorDefaultCf() throws RocksDBException { + try (final ComparatorOptions options = new ComparatorOptions() + .setUseDirectBuffer(useDirectBuffer) + .setMaxReusedBufferSize(maxReusedBufferSize) + // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used + .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType); + final BytewiseComparator comparator = new BytewiseComparator(options)) { + + // test the round-tripability of keys written and read with the Comparator + testRoundtrip(FileSystems.getDefault().getPath( + dbFolder.getRoot().getAbsolutePath()), comparator); + } + } + + @Test + public void javaComparatorNamedCf() throws RocksDBException { + try (final ComparatorOptions options = new ComparatorOptions() + .setUseDirectBuffer(useDirectBuffer) + .setMaxReusedBufferSize(maxReusedBufferSize) + // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used + .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType); + final BytewiseComparator comparator = new BytewiseComparator(options)) { + + // test the round-tripability of keys written and read with the Comparator + testRoundtripCf(FileSystems.getDefault().getPath( + dbFolder.getRoot().getAbsolutePath()), comparator); + } + } + + /** + * Test which stores random keys into the database + * using an {@link IntComparator} + * it then checks that these keys are read back in + * ascending order + * + * @param db_path A path where we can store database + * files temporarily + * + * @param comparator the comparator + * + * @throws RocksDBException if a database error happens. + */ + private void testRoundtrip(final Path db_path, + final AbstractComparator comparator) throws RocksDBException { + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setComparator(comparator)) { + + // store TOTAL_KEYS into the db + try (final RocksDB db = RocksDB.open(opt, db_path.toString())) { + for (int i = 0; i < TOTAL_KEYS; i++) { + db.put(keys[i], "value".getBytes(UTF_8)); + } + } + + // re-open db and read from start to end + // integer keys should be in ascending + // order as defined by IntComparator + final ByteBuffer key = ByteBuffer.allocate(4); + try (final RocksDB db = RocksDB.open(opt, db_path.toString()); + final RocksIterator it = db.newIterator()) { + it.seekToFirst(); + int lastKey = Integer.MIN_VALUE; + int count = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + key.put(it.key()); + key.flip(); + final int thisKey = key.getInt(); + key.clear(); + assertThat(thisKey).isGreaterThan(lastKey); + lastKey = thisKey; + count++; + } + assertThat(count).isEqualTo(TOTAL_KEYS); + } + } + } + + /** + * Test which stores random keys into a column family + * in the database + * using an {@link IntComparator} + * it then checks that these keys are read back in + * ascending order + * + * @param db_path A path where we can store database + * files temporarily + * + * @param comparator the comparator + * + * @throws RocksDBException if a database error happens. + */ + private void testRoundtripCf(final Path db_path, + final AbstractComparator comparator) throws RocksDBException { + + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), + new ColumnFamilyOptions() + .setComparator(comparator)) + ); + + final List cfHandles = new ArrayList<>(); + + try (final DBOptions opt = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true)) { + + try (final RocksDB db = RocksDB.open(opt, db_path.toString(), + cfDescriptors, cfHandles)) { + try { + assertThat(cfDescriptors.size()).isEqualTo(2); + assertThat(cfHandles.size()).isEqualTo(2); + + for (int i = 0; i < TOTAL_KEYS; i++) { + db.put(cfHandles.get(1), keys[i], "value".getBytes(UTF_8)); + } + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + cfHandles.clear(); + } + } + + // re-open db and read from start to end + // integer keys should be in ascending + // order as defined by SimpleIntComparator + final ByteBuffer key = ByteBuffer.allocate(4); + try (final RocksDB db = RocksDB.open(opt, db_path.toString(), + cfDescriptors, cfHandles); + final RocksIterator it = db.newIterator(cfHandles.get(1))) { + try { + assertThat(cfDescriptors.size()).isEqualTo(2); + assertThat(cfHandles.size()).isEqualTo(2); + + it.seekToFirst(); + int lastKey = Integer.MIN_VALUE; + int count = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + key.put(it.key()); + key.flip(); + final int thisKey = key.getInt(); + key.clear(); + assertThat(thisKey).isGreaterThan(lastKey); + lastKey = thisKey; + count++; + } + + assertThat(count).isEqualTo(TOTAL_KEYS); + + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + cfHandles.clear(); + for (final ColumnFamilyDescriptor cfDescriptor : cfDescriptors) { + cfDescriptor.getOptions().close(); + } + } + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java new file mode 100644 index 000000000..69f2c282b --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java @@ -0,0 +1,531 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.*; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.*; +import java.util.*; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.Assert.*; +import static org.rocksdb.util.ByteUtil.bytes; + +/** + * This is a direct port of various C++ + * tests from db/comparator_db_test.cc + * and some code to adapt it to RocksJava + */ +public class BytewiseComparatorTest { + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + private List source_strings = Arrays.asList("b", "d", "f", "h", "j", "l"); + private List interleaving_strings = Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"); + + /** + * Open the database using the C++ BytewiseComparatorImpl + * and test the results against our Java BytewiseComparator + */ + @Test + public void java_vs_cpp_bytewiseComparator() + throws IOException, RocksDBException { + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = + FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath()); + try(final RocksDB db = openDatabase(dbDir, + BuiltinComparator.BYTEWISE_COMPARATOR)) { + + final Random rnd = new Random(rand_seed); + try(final ComparatorOptions copt2 = new ComparatorOptions() + .setUseDirectBuffer(false); + final AbstractComparator comparator2 = new BytewiseComparator(copt2)) { + final java.util.Comparator jComparator = toJavaComparator(comparator2); + doRandomIterationTest( + db, + jComparator, + rnd, + 8, 100, 3 + ); + } + } + } + } + + /** + * Open the database using the Java BytewiseComparator + * and test the results against another Java BytewiseComparator + */ + @Test + public void java_vs_java_bytewiseComparator() + throws IOException, RocksDBException { + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = + FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath()); + try(final ComparatorOptions copt = new ComparatorOptions() + .setUseDirectBuffer(false); + final AbstractComparator comparator = new BytewiseComparator(copt); + final RocksDB db = openDatabase(dbDir, comparator)) { + + final Random rnd = new Random(rand_seed); + try(final ComparatorOptions copt2 = new ComparatorOptions() + .setUseDirectBuffer(false); + final AbstractComparator comparator2 = new BytewiseComparator(copt2)) { + final java.util.Comparator jComparator = toJavaComparator(comparator2); + doRandomIterationTest( + db, + jComparator, + rnd, + 8, 100, 3 + ); + } + } + } + } + + /** + * Open the database using the C++ BytewiseComparatorImpl + * and test the results against our Java DirectBytewiseComparator + */ + @Test + public void java_vs_cpp_directBytewiseComparator() + throws IOException, RocksDBException { + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = + FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath()); + try(final RocksDB db = openDatabase(dbDir, + BuiltinComparator.BYTEWISE_COMPARATOR)) { + + final Random rnd = new Random(rand_seed); + try(final ComparatorOptions copt2 = new ComparatorOptions() + .setUseDirectBuffer(true); + final AbstractComparator comparator2 = new BytewiseComparator(copt2)) { + final java.util.Comparator jComparator = toJavaComparator(comparator2); + doRandomIterationTest( + db, + jComparator, + rnd, + 8, 100, 3 + ); + } + } + } + } + + /** + * Open the database using the Java DirectBytewiseComparator + * and test the results against another Java DirectBytewiseComparator + */ + @Test + public void java_vs_java_directBytewiseComparator() + throws IOException, RocksDBException { + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = + FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath()); + try (final ComparatorOptions copt = new ComparatorOptions() + .setUseDirectBuffer(true); + final AbstractComparator comparator = new BytewiseComparator(copt); + final RocksDB db = openDatabase(dbDir, comparator)) { + + final Random rnd = new Random(rand_seed); + try(final ComparatorOptions copt2 = new ComparatorOptions() + .setUseDirectBuffer(true); + final AbstractComparator comparator2 = new BytewiseComparator(copt2)) { + final java.util.Comparator jComparator = toJavaComparator(comparator2); + doRandomIterationTest( + db, + jComparator, + rnd, + 8, 100, 3 + ); + } + } + } + } + + /** + * Open the database using the C++ ReverseBytewiseComparatorImpl + * and test the results against our Java ReverseBytewiseComparator + */ + @Test + public void java_vs_cpp_reverseBytewiseComparator() + throws IOException, RocksDBException { + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = + FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath()); + try(final RocksDB db = openDatabase(dbDir, + BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR)) { + + final Random rnd = new Random(rand_seed); + try(final ComparatorOptions copt2 = new ComparatorOptions() + .setUseDirectBuffer(false); + final AbstractComparator comparator2 = new ReverseBytewiseComparator(copt2)) { + final java.util.Comparator jComparator = toJavaComparator(comparator2); + doRandomIterationTest( + db, + jComparator, + rnd, + 8, 100, 3 + ); + } + } + } + } + + /** + * Open the database using the Java ReverseBytewiseComparator + * and test the results against another Java ReverseBytewiseComparator + */ + @Test + public void java_vs_java_reverseBytewiseComparator() + throws IOException, RocksDBException { + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = + FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath()); + try (final ComparatorOptions copt = new ComparatorOptions() + .setUseDirectBuffer(false); + final AbstractComparator comparator = new ReverseBytewiseComparator(copt); + final RocksDB db = openDatabase(dbDir, comparator)) { + + final Random rnd = new Random(rand_seed); + try(final ComparatorOptions copt2 = new ComparatorOptions() + .setUseDirectBuffer(false); + final AbstractComparator comparator2 = new ReverseBytewiseComparator(copt2)) { + final java.util.Comparator jComparator = toJavaComparator(comparator2); + doRandomIterationTest( + db, + jComparator, + rnd, + 8, 100, 3 + ); + } + } + } + } + + private void doRandomIterationTest( + final RocksDB db, final java.util.Comparator javaComparator, + final Random rnd, + final int num_writes, final int num_iter_ops, + final int num_trigger_flush) throws RocksDBException { + + final TreeMap map = new TreeMap<>(javaComparator); + + try (final FlushOptions flushOptions = new FlushOptions(); + final WriteOptions writeOptions = new WriteOptions()) { + for (int i = 0; i < num_writes; i++) { + if (num_trigger_flush > 0 && i != 0 && i % num_trigger_flush == 0) { + db.flush(flushOptions); + } + + final int type = rnd.nextInt(2); + final int index = rnd.nextInt(source_strings.size()); + final String key = source_strings.get(index); + switch (type) { + case 0: + // put + map.put(key, key); + db.put(writeOptions, bytes(key), bytes(key)); + break; + case 1: + // delete + if (map.containsKey(key)) { + map.remove(key); + } + db.delete(writeOptions, bytes(key)); + break; + + default: + fail("Should not be able to generate random outside range 1..2"); + } + } + } + + try (final ReadOptions readOptions = new ReadOptions(); + final RocksIterator iter = db.newIterator(readOptions)) { + final KVIter result_iter = new KVIter<>(map); + + boolean is_valid = false; + for (int i = 0; i < num_iter_ops; i++) { + // Random walk and make sure iter and result_iter returns the + // same key and value + final int type = rnd.nextInt(8); + iter.status(); + switch (type) { + case 0: + // Seek to First + iter.seekToFirst(); + result_iter.seekToFirst(); + break; + case 1: + // Seek to last + iter.seekToLast(); + result_iter.seekToLast(); + break; + case 2: { + // Seek to random (existing or non-existing) key + final int key_idx = rnd.nextInt(interleaving_strings.size()); + final String key = interleaving_strings.get(key_idx); + iter.seek(bytes(key)); + result_iter.seek(bytes(key)); + break; + } + case 3: { + // SeekForPrev to random (existing or non-existing) key + final int key_idx = rnd.nextInt(interleaving_strings.size()); + final String key = interleaving_strings.get(key_idx); + iter.seekForPrev(bytes(key)); + result_iter.seekForPrev(bytes(key)); + break; + } + case 4: + // Next + if (is_valid) { + iter.next(); + result_iter.next(); + } else { + continue; + } + break; + case 5: + // Prev + if (is_valid) { + iter.prev(); + result_iter.prev(); + } else { + continue; + } + break; + case 6: + // Refresh + iter.refresh(); + result_iter.refresh(); + iter.seekToFirst(); + result_iter.seekToFirst(); + break; + default: { + assert (type == 7); + final int key_idx = rnd.nextInt(source_strings.size()); + final String key = source_strings.get(key_idx); + final byte[] result = db.get(readOptions, bytes(key)); + if (!map.containsKey(key)) { + assertNull(result); + } else { + assertArrayEquals(bytes(map.get(key)), result); + } + break; + } + } + + assertEquals(result_iter.isValid(), iter.isValid()); + + is_valid = iter.isValid(); + + if (is_valid) { + assertArrayEquals(bytes(result_iter.key()), iter.key()); + + //note that calling value on a non-valid iterator from the Java API + //results in a SIGSEGV + assertArrayEquals(bytes(result_iter.value()), iter.value()); + } + } + } + } + + /** + * Open the database using a C++ Comparator + */ + private RocksDB openDatabase( + final Path dbDir, final BuiltinComparator cppComparator) + throws IOException, RocksDBException { + final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(cppComparator); + return RocksDB.open(options, dbDir.toAbsolutePath().toString()); + } + + /** + * Open the database using a Java Comparator + */ + private RocksDB openDatabase( + final Path dbDir, + final AbstractComparator javaComparator) + throws IOException, RocksDBException { + final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(javaComparator); + return RocksDB.open(options, dbDir.toAbsolutePath().toString()); + } + + private java.util.Comparator toJavaComparator( + final AbstractComparator rocksComparator) { + return new java.util.Comparator() { + @Override + public int compare(final String s1, final String s2) { + final ByteBuffer bufS1; + final ByteBuffer bufS2; + if (rocksComparator.usingDirectBuffers()) { + bufS1 = ByteBuffer.allocateDirect(s1.length()); + bufS2 = ByteBuffer.allocateDirect(s2.length()); + } else { + bufS1 = ByteBuffer.allocate(s1.length()); + bufS2 = ByteBuffer.allocate(s2.length()); + } + bufS1.put(bytes(s1)); + bufS1.flip(); + bufS2.put(bytes(s2)); + bufS2.flip(); + return rocksComparator.compare(bufS1, bufS2); + } + }; + } + + private static class KVIter implements RocksIteratorInterface { + + private final List> entries; + private final java.util.Comparator comparator; + private int offset = -1; + + private int lastPrefixMatchIdx = -1; + private int lastPrefixMatch = 0; + + public KVIter(final TreeMap map) { + this.entries = new ArrayList<>(); + entries.addAll(map.entrySet()); + this.comparator = map.comparator(); + } + + + @Override + public boolean isValid() { + return offset > -1 && offset < entries.size(); + } + + @Override + public void seekToFirst() { + offset = 0; + } + + @Override + public void seekToLast() { + offset = entries.size() - 1; + } + + @SuppressWarnings("unchecked") + @Override + public void seek(final byte[] target) { + for(offset = 0; offset < entries.size(); offset++) { + if(comparator.compare(entries.get(offset).getKey(), + (K)new String(target, UTF_8)) >= 0) { + return; + } + } + } + + @SuppressWarnings("unchecked") + @Override + public void seekForPrev(final byte[] target) { + for(offset = entries.size()-1; offset >= 0; offset--) { + if(comparator.compare(entries.get(offset).getKey(), + (K)new String(target, UTF_8)) <= 0) { + return; + } + } + } + + /** + * Is `a` a prefix of `b` + * + * @return The length of the matching prefix, or 0 if it is not a prefix + */ + private int isPrefix(final byte[] a, final byte[] b) { + if(b.length >= a.length) { + for(int i = 0; i < a.length; i++) { + if(a[i] != b[i]) { + return i; + } + } + return a.length; + } else { + return 0; + } + } + + @Override + public void next() { + if(offset < entries.size()) { + offset++; + } + } + + @Override + public void prev() { + if(offset >= 0) { + offset--; + } + } + + @Override + public void refresh() throws RocksDBException { + offset = -1; + } + + @Override + public void status() throws RocksDBException { + if(offset < 0 || offset >= entries.size()) { + throw new RocksDBException("Index out of bounds. Size is: " + + entries.size() + ", offset is: " + offset); + } + } + + @SuppressWarnings("unchecked") + public K key() { + if(!isValid()) { + if(entries.isEmpty()) { + return (K)""; + } else if(offset == -1){ + return entries.get(0).getKey(); + } else if(offset == entries.size()) { + return entries.get(offset - 1).getKey(); + } else { + return (K)""; + } + } else { + return entries.get(offset).getKey(); + } + } + + @SuppressWarnings("unchecked") + public V value() { + if(!isValid()) { + return (V)""; + } else { + return entries.get(offset).getValue(); + } + } + + @Override + public void seek(ByteBuffer target) { + throw new IllegalAccessError("Not implemented"); + } + + @Override + public void seekForPrev(ByteBuffer target) { + throw new IllegalAccessError("Not implemented"); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java new file mode 100644 index 000000000..8ea104332 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/CapturingWriteBatchHandler.java @@ -0,0 +1,190 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb.util; + +import org.rocksdb.RocksDBException; +import org.rocksdb.WriteBatch; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A simple WriteBatch Handler which adds a record + * of each event that it receives to a list + */ +public class CapturingWriteBatchHandler extends WriteBatch.Handler { + + private final List events = new ArrayList<>(); + + /** + * Returns a copy of the current events list + * + * @return a list of the events which have happened upto now + */ + public List getEvents() { + return new ArrayList<>(events); + } + + @Override + public void put(final int columnFamilyId, final byte[] key, + final byte[] value) { + events.add(new Event(Action.PUT, columnFamilyId, key, value)); + } + + @Override + public void put(final byte[] key, final byte[] value) { + events.add(new Event(Action.PUT, key, value)); + } + + @Override + public void merge(final int columnFamilyId, final byte[] key, + final byte[] value) { + events.add(new Event(Action.MERGE, columnFamilyId, key, value)); + } + + @Override + public void merge(final byte[] key, final byte[] value) { + events.add(new Event(Action.MERGE, key, value)); + } + + @Override + public void delete(final int columnFamilyId, final byte[] key) { + events.add(new Event(Action.DELETE, columnFamilyId, key, (byte[])null)); + } + + @Override + public void delete(final byte[] key) { + events.add(new Event(Action.DELETE, key, (byte[])null)); + } + + @Override + public void singleDelete(final int columnFamilyId, final byte[] key) { + events.add(new Event(Action.SINGLE_DELETE, + columnFamilyId, key, (byte[])null)); + } + + @Override + public void singleDelete(final byte[] key) { + events.add(new Event(Action.SINGLE_DELETE, key, (byte[])null)); + } + + @Override + public void deleteRange(final int columnFamilyId, final byte[] beginKey, + final byte[] endKey) { + events.add(new Event(Action.DELETE_RANGE, columnFamilyId, beginKey, + endKey)); + } + + @Override + public void deleteRange(final byte[] beginKey, final byte[] endKey) { + events.add(new Event(Action.DELETE_RANGE, beginKey, endKey)); + } + + @Override + public void logData(final byte[] blob) { + events.add(new Event(Action.LOG, (byte[])null, blob)); + } + + @Override + public void putBlobIndex(final int columnFamilyId, final byte[] key, + final byte[] value) { + events.add(new Event(Action.PUT_BLOB_INDEX, key, value)); + } + + @Override + public void markBeginPrepare() throws RocksDBException { + events.add(new Event(Action.MARK_BEGIN_PREPARE, (byte[])null, + (byte[])null)); + } + + @Override + public void markEndPrepare(final byte[] xid) throws RocksDBException { + events.add(new Event(Action.MARK_END_PREPARE, (byte[])null, + (byte[])null)); + } + + @Override + public void markNoop(final boolean emptyBatch) throws RocksDBException { + events.add(new Event(Action.MARK_NOOP, (byte[])null, (byte[])null)); + } + + @Override + public void markRollback(final byte[] xid) throws RocksDBException { + events.add(new Event(Action.MARK_ROLLBACK, (byte[])null, (byte[])null)); + } + + @Override + public void markCommit(final byte[] xid) throws RocksDBException { + events.add(new Event(Action.MARK_COMMIT, (byte[])null, (byte[])null)); + } + + @Override + public void markCommitWithTimestamp(final byte[] xid, final byte[] ts) throws RocksDBException { + events.add(new Event(Action.MARK_COMMIT_WITH_TIMESTAMP, (byte[]) null, (byte[]) null)); + } + + public static class Event { + public final Action action; + public final int columnFamilyId; + public final byte[] key; + public final byte[] value; + + public Event(final Action action, final byte[] key, final byte[] value) { + this(action, 0, key, value); + } + + public Event(final Action action, final int columnFamilyId, final byte[] key, + final byte[] value) { + this.action = action; + this.columnFamilyId = columnFamilyId; + this.key = key; + this.value = value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final Event event = (Event) o; + return columnFamilyId == event.columnFamilyId && + action == event.action && + ((key == null && event.key == null) + || Arrays.equals(key, event.key)) && + ((value == null && event.value == null) + || Arrays.equals(value, event.value)); + } + + @Override + public int hashCode() { + int result = Objects.hash(action, columnFamilyId); + result = 31 * result + Arrays.hashCode(key); + result = 31 * result + Arrays.hashCode(value); + return result; + } + } + + /** + * Enumeration of Write Batch + * event actions + */ + public enum Action { + PUT, + MERGE, + DELETE, + SINGLE_DELETE, + DELETE_RANGE, + LOG, + PUT_BLOB_INDEX, + MARK_BEGIN_PREPARE, + MARK_END_PREPARE, + MARK_NOOP, + MARK_COMMIT, + MARK_ROLLBACK, + MARK_COMMIT_WITH_TIMESTAMP + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/DirectByteBufferAllocator.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/DirectByteBufferAllocator.java new file mode 100644 index 000000000..d26fb578b --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/DirectByteBufferAllocator.java @@ -0,0 +1,18 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import java.nio.ByteBuffer; + +public final class DirectByteBufferAllocator implements ByteBufferAllocator { + DirectByteBufferAllocator(){}; + + @Override + public ByteBuffer allocate(final int capacity) { + return ByteBuffer.allocateDirect(capacity); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java new file mode 100644 index 000000000..ae340e06d --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java @@ -0,0 +1,304 @@ +// Copyright (c) 2014, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb.util; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.hamcrest.Matchers.is; + +import java.lang.reflect.Field; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +public class EnvironmentTest { + private final static String ARCH_FIELD_NAME = "ARCH"; + private final static String OS_FIELD_NAME = "OS"; + + private final static String MUSL_ENVIRONMENT_FIELD_NAME = "MUSL_ENVIRONMENT"; + private final static String MUSL_LIBC_FIELD_NAME = "MUSL_LIBC"; + + private static String INITIAL_OS; + private static String INITIAL_ARCH; + private static String INITIAL_MUSL_ENVIRONMENT; + private static Boolean INITIAL_MUSL_LIBC; + + @BeforeClass + public static void saveState() { + INITIAL_ARCH = getEnvironmentClassField(ARCH_FIELD_NAME); + INITIAL_OS = getEnvironmentClassField(OS_FIELD_NAME); + INITIAL_MUSL_LIBC = getEnvironmentClassField(MUSL_LIBC_FIELD_NAME); + INITIAL_MUSL_ENVIRONMENT = getEnvironmentClassField(MUSL_ENVIRONMENT_FIELD_NAME); + } + + @Test + public void mac32() { + setEnvironmentClassFields("mac", "32"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".jnilib"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-osx.jnilib"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.dylib"); + } + + @Test + public void mac64_x86_64() { + setEnvironmentClassFields("mac", "x86_64"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".jnilib"); + assertThat(Environment.getJniLibraryFileName("rocksdb")) + .isEqualTo("librocksdbjni-osx-x86_64.jnilib"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")) + .isEqualTo("librocksdbjni-osx.jnilib"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.dylib"); + } + + @Test + public void macAarch64() { + setEnvironmentClassFields("mac", "aarch64"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()).isEqualTo(".jnilib"); + assertThat(Environment.getJniLibraryFileName("rocksdb")) + .isEqualTo("librocksdbjni-osx-arm64.jnilib"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")) + .isEqualTo("librocksdbjni-osx.jnilib"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")).isEqualTo("librocksdbjni.dylib"); + } + + @Test + public void nix32() { + // Linux + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false); + setEnvironmentClassFields("Linux", "32"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux32.so"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + // Linux musl-libc (Alpine) + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, true); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux32-musl.so"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + // UNIX + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false); + setEnvironmentClassFields("Unix", "32"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux32.so"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + } + + @Test(expected = UnsupportedOperationException.class) + public void aix32() { + // AIX + setEnvironmentClassFields("aix", "32"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")).isEqualTo("blah"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + } + + @Test + public void nix64() { + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false); + setEnvironmentClassFields("Linux", "x64"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux64.so"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + // Linux musl-libc (Alpine) + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, true); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux64-musl.so"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + // UNIX + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false); + setEnvironmentClassFields("Unix", "x64"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux64.so"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + // AIX + setEnvironmentClassFields("aix", "x64"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-aix64.so"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + } + + @Test + public void detectWindows(){ + setEnvironmentClassFields("win", "x64"); + assertThat(Environment.isWindows()).isTrue(); + } + + @Test + public void win64() { + setEnvironmentClassFields("win", "x64"); + assertThat(Environment.isWindows()).isTrue(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".dll"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-win64.dll"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.dll"); + } + + @Test + public void ppc64le() { + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false); + setEnvironmentClassFields("Linux", "ppc64le"); + assertThat(Environment.isUnix()).isTrue(); + assertThat(Environment.isPowerPC()).isTrue(); + assertThat(Environment.is64Bit()).isTrue(); + assertThat(Environment.getJniLibraryExtension()).isEqualTo(".so"); + assertThat(Environment.getSharedLibraryName("rocksdb")).isEqualTo("rocksdbjni"); + assertThat(Environment.getJniLibraryName("rocksdb")).isEqualTo("rocksdbjni-linux-ppc64le"); + assertThat(Environment.getJniLibraryFileName("rocksdb")) + .isEqualTo("librocksdbjni-linux-ppc64le.so"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + assertThat(Environment.getSharedLibraryFileName("rocksdb")).isEqualTo("librocksdbjni.so"); + // Linux musl-libc (Alpine) + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, true); + setEnvironmentClassFields("Linux", "ppc64le"); + assertThat(Environment.isUnix()).isTrue(); + assertThat(Environment.isPowerPC()).isTrue(); + assertThat(Environment.is64Bit()).isTrue(); + assertThat(Environment.getJniLibraryExtension()).isEqualTo(".so"); + assertThat(Environment.getSharedLibraryName("rocksdb")).isEqualTo("rocksdbjni"); + assertThat(Environment.getJniLibraryName("rocksdb")).isEqualTo("rocksdbjni-linux-ppc64le-musl"); + assertThat(Environment.getJniLibraryFileName("rocksdb")) + .isEqualTo("librocksdbjni-linux-ppc64le-musl.so"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + assertThat(Environment.getSharedLibraryFileName("rocksdb")).isEqualTo("librocksdbjni.so"); + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false); + } + + @Test + public void linuxArch64() { + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false); + setEnvironmentClassFields("Linux", "aarch64"); + assertThat(Environment.isUnix()).isTrue(); + assertThat(Environment.isAarch64()).isTrue(); + assertThat(Environment.is64Bit()).isTrue(); + assertThat(Environment.getJniLibraryExtension()).isEqualTo(".so"); + assertThat(Environment.getSharedLibraryName("rocksdb")).isEqualTo("rocksdbjni"); + assertThat(Environment.getJniLibraryName("rocksdb")).isEqualTo("rocksdbjni-linux-aarch64"); + assertThat(Environment.getJniLibraryFileName("rocksdb")) + .isEqualTo("librocksdbjni-linux-aarch64.so"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + assertThat(Environment.getSharedLibraryFileName("rocksdb")).isEqualTo("librocksdbjni.so"); + // Linux musl-libc (Alpine) + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, true); + setEnvironmentClassFields("Linux", "aarch64"); + assertThat(Environment.isUnix()).isTrue(); + assertThat(Environment.isAarch64()).isTrue(); + assertThat(Environment.is64Bit()).isTrue(); + assertThat(Environment.getJniLibraryExtension()).isEqualTo(".so"); + assertThat(Environment.getSharedLibraryName("rocksdb")).isEqualTo("rocksdbjni"); + assertThat(Environment.getJniLibraryName("rocksdb")).isEqualTo("rocksdbjni-linux-aarch64-musl"); + assertThat(Environment.getJniLibraryFileName("rocksdb")) + .isEqualTo("librocksdbjni-linux-aarch64-musl.so"); + assertThat(Environment.getFallbackJniLibraryFileName("rocksdb")).isNull(); + assertThat(Environment.getSharedLibraryFileName("rocksdb")).isEqualTo("librocksdbjni.so"); + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, false); + } + + @Test + public void resolveIsMuslLibc() { + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, null); + setEnvironmentClassFields("win", "anyarch"); + assertThat(Environment.isUnix()).isFalse(); + + // with user input, will resolve to true if set as true. Even on OSs that appear absurd for + // musl. Users choice + assertThat(Environment.initIsMuslLibc()).isFalse(); + setEnvironmentClassField(MUSL_ENVIRONMENT_FIELD_NAME, "true"); + assertThat(Environment.initIsMuslLibc()).isTrue(); + setEnvironmentClassField(MUSL_ENVIRONMENT_FIELD_NAME, "false"); + assertThat(Environment.initIsMuslLibc()).isFalse(); + } + + private void setEnvironmentClassFields(String osName, + String osArch) { + setEnvironmentClassField(OS_FIELD_NAME, osName); + setEnvironmentClassField(ARCH_FIELD_NAME, osArch); + } + + @AfterClass + public static void restoreState() { + setEnvironmentClassField(OS_FIELD_NAME, INITIAL_OS); + setEnvironmentClassField(ARCH_FIELD_NAME, INITIAL_ARCH); + setEnvironmentClassField(MUSL_ENVIRONMENT_FIELD_NAME, INITIAL_MUSL_ENVIRONMENT); + setEnvironmentClassField(MUSL_LIBC_FIELD_NAME, INITIAL_MUSL_LIBC); + } + + @SuppressWarnings("unchecked") + private static T getEnvironmentClassField(String fieldName) { + final Field field; + try { + field = Environment.class.getDeclaredField(fieldName); + field.setAccessible(true); + /* Fails in JDK 13; and not needed unless fields are final + final Field modifiersField = Field.class.getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + */ + return (T)field.get(null); + } catch (final NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + private static void setEnvironmentClassField(String fieldName, Object value) { + final Field field; + try { + field = Environment.class.getDeclaredField(fieldName); + field.setAccessible(true); + /* Fails in JDK 13; and not needed unless fields are final + final Field modifiersField = Field.class.getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + */ + field.set(null, value); + } catch (final NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/HeapByteBufferAllocator.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/HeapByteBufferAllocator.java new file mode 100644 index 000000000..ad6b8f6f4 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/HeapByteBufferAllocator.java @@ -0,0 +1,18 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import java.nio.ByteBuffer; + +public final class HeapByteBufferAllocator implements ByteBufferAllocator { + HeapByteBufferAllocator(){}; + + @Override + public ByteBuffer allocate(final int capacity) { + return ByteBuffer.allocate(capacity); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/IntComparatorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/IntComparatorTest.java new file mode 100644 index 000000000..dd3288513 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/IntComparatorTest.java @@ -0,0 +1,266 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.rocksdb.*; + +import java.nio.ByteBuffer; +import java.nio.file.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Tests for IntComparator, but more generally + * also for rocksdb::ComparatorJniCallback implementation. + */ +@RunWith(Parameterized.class) +public class IntComparatorTest { + + // test with 500 random integer keys + private static final int TOTAL_KEYS = 500; + private static final byte[][] keys = new byte[TOTAL_KEYS][4]; + + @BeforeClass + public static void prepareKeys() { + final ByteBuffer buf = ByteBuffer.allocate(4); + final Random random = new Random(); + for (int i = 0; i < TOTAL_KEYS; i++) { + final int ri = random.nextInt(); + buf.putInt(ri); + buf.flip(); + final byte[] key = buf.array(); + + // does key already exist (avoid duplicates) + if (keyExists(key, i)) { + i--; // loop round and generate a different key + } else { + System.arraycopy(key, 0, keys[i], 0, 4); + } + } + } + + private static boolean keyExists(final byte[] key, final int limit) { + for (int j = 0; j < limit; j++) { + if (Arrays.equals(key, keys[j])) { + return true; + } + } + return false; + } + + @Parameters(name = "{0}") + public static Iterable parameters() { + return Arrays.asList(new Object[][] { + { "non-direct_reused64_mutex", false, 64, ReusedSynchronisationType.MUTEX }, + { "direct_reused64_mutex", true, 64, ReusedSynchronisationType.MUTEX }, + { "non-direct_reused64_adaptive-mutex", false, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX }, + { "direct_reused64_adaptive-mutex", true, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX }, + { "non-direct_reused64_thread-local", false, 64, ReusedSynchronisationType.THREAD_LOCAL }, + { "direct_reused64_thread-local", true, 64, ReusedSynchronisationType.THREAD_LOCAL }, + { "non-direct_noreuse", false, -1, null }, + { "direct_noreuse", true, -1, null } + }); + } + + @Parameter(0) + public String name; + + @Parameter(1) + public boolean useDirectBuffer; + + @Parameter(2) + public int maxReusedBufferSize; + + @Parameter(3) + public ReusedSynchronisationType reusedSynchronisationType; + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + + @Test + public void javaComparatorDefaultCf() throws RocksDBException { + try (final ComparatorOptions options = new ComparatorOptions() + .setUseDirectBuffer(useDirectBuffer) + .setMaxReusedBufferSize(maxReusedBufferSize) + // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used + .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType); + final IntComparator comparator = new IntComparator(options)) { + + // test the round-tripability of keys written and read with the Comparator + testRoundtrip(FileSystems.getDefault().getPath( + dbFolder.getRoot().getAbsolutePath()), comparator); + } + } + + @Test + public void javaComparatorNamedCf() throws RocksDBException { + try (final ComparatorOptions options = new ComparatorOptions() + .setUseDirectBuffer(useDirectBuffer) + .setMaxReusedBufferSize(maxReusedBufferSize) + // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used + .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType); + final IntComparator comparator = new IntComparator(options)) { + + // test the round-tripability of keys written and read with the Comparator + testRoundtripCf(FileSystems.getDefault().getPath( + dbFolder.getRoot().getAbsolutePath()), comparator); + } + } + + /** + * Test which stores random keys into the database + * using an {@link IntComparator} + * it then checks that these keys are read back in + * ascending order + * + * @param db_path A path where we can store database + * files temporarily + * + * @param comparator the comparator + * + * @throws RocksDBException if a database error happens. + */ + private void testRoundtrip(final Path db_path, + final AbstractComparator comparator) throws RocksDBException { + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setComparator(comparator)) { + + // store TOTAL_KEYS into the db + try (final RocksDB db = RocksDB.open(opt, db_path.toString())) { + for (int i = 0; i < TOTAL_KEYS; i++) { + db.put(keys[i], "value".getBytes(UTF_8)); + } + } + + // re-open db and read from start to end + // integer keys should be in ascending + // order as defined by IntComparator + final ByteBuffer key = ByteBuffer.allocate(4); + try (final RocksDB db = RocksDB.open(opt, db_path.toString()); + final RocksIterator it = db.newIterator()) { + it.seekToFirst(); + int lastKey = Integer.MIN_VALUE; + int count = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + key.put(it.key()); + key.flip(); + final int thisKey = key.getInt(); + key.clear(); + assertThat(thisKey).isGreaterThan(lastKey); + lastKey = thisKey; + count++; + } + assertThat(count).isEqualTo(TOTAL_KEYS); + } + } + } + + /** + * Test which stores random keys into a column family + * in the database + * using an {@link IntComparator} + * it then checks that these keys are read back in + * ascending order + * + * @param db_path A path where we can store database + * files temporarily + * + * @param comparator the comparator + * + * @throws RocksDBException if a database error happens. + */ + private void testRoundtripCf(final Path db_path, + final AbstractComparator comparator) throws RocksDBException { + + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), + new ColumnFamilyOptions() + .setComparator(comparator)) + ); + + final List cfHandles = new ArrayList<>(); + + try (final DBOptions opt = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true)) { + + try (final RocksDB db = RocksDB.open(opt, db_path.toString(), + cfDescriptors, cfHandles)) { + try { + assertThat(cfDescriptors.size()).isEqualTo(2); + assertThat(cfHandles.size()).isEqualTo(2); + + for (int i = 0; i < TOTAL_KEYS; i++) { + db.put(cfHandles.get(1), keys[i], "value".getBytes(UTF_8)); + } + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + cfHandles.clear(); + } + } + + // re-open db and read from start to end + // integer keys should be in ascending + // order as defined by SimpleIntComparator + final ByteBuffer key = ByteBuffer.allocate(4); + try (final RocksDB db = RocksDB.open(opt, db_path.toString(), + cfDescriptors, cfHandles); + final RocksIterator it = db.newIterator(cfHandles.get(1))) { + try { + assertThat(cfDescriptors.size()).isEqualTo(2); + assertThat(cfHandles.size()).isEqualTo(2); + + it.seekToFirst(); + int lastKey = Integer.MIN_VALUE; + int count = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + key.put(it.key()); + key.flip(); + final int thisKey = key.getInt(); + key.clear(); + assertThat(thisKey).isGreaterThan(lastKey); + lastKey = thisKey; + count++; + } + + assertThat(count).isEqualTo(TOTAL_KEYS); + + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + cfHandles.clear(); + for (final ColumnFamilyDescriptor cfDescriptor : cfDescriptors) { + cfDescriptor.getOptions().close(); + } + } + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/JNIComparatorTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/JNIComparatorTest.java new file mode 100644 index 000000000..a962b8d78 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/JNIComparatorTest.java @@ -0,0 +1,180 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. +// +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.rocksdb.*; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.*; +import java.util.Arrays; + +import static org.assertj.core.api.Assertions.assertThat; + +@RunWith(Parameterized.class) +public class JNIComparatorTest { + + @Parameters(name = "{0}") + public static Iterable parameters() { + return Arrays.asList(new Object[][] { + { "bytewise_non-direct", BuiltinComparator.BYTEWISE_COMPARATOR, false }, + { "bytewise_direct", BuiltinComparator.BYTEWISE_COMPARATOR, true }, + { "reverse-bytewise_non-direct", BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR, false }, + { "reverse-bytewise_direct", BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR, true }, + }); + } + + @Parameter(0) + public String name; + + @Parameter(1) + public BuiltinComparator builtinComparator; + + @Parameter(2) + public boolean useDirectBuffer; + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + private static final int MIN = Short.MIN_VALUE - 1; + private static final int MAX = Short.MAX_VALUE + 1; + + @Test + public void java_comparator_equals_cpp_comparator() throws RocksDBException, IOException { + final int[] javaKeys; + try (final ComparatorOptions comparatorOptions = new ComparatorOptions(); + final AbstractComparator comparator = builtinComparator == BuiltinComparator.BYTEWISE_COMPARATOR + ? new BytewiseComparator(comparatorOptions) + : new ReverseBytewiseComparator(comparatorOptions)) { + final Path javaDbDir = + FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath()); + storeWithJavaComparator(javaDbDir, comparator); + javaKeys = readAllWithJavaComparator(javaDbDir, comparator); + } + + final Path cppDbDir = + FileSystems.getDefault().getPath(dbFolder.newFolder().getAbsolutePath()); + storeWithCppComparator(cppDbDir, builtinComparator); + final int[] cppKeys = + readAllWithCppComparator(cppDbDir, builtinComparator); + + assertThat(javaKeys).isEqualTo(cppKeys); + } + + private void storeWithJavaComparator(final Path dir, + final AbstractComparator comparator) throws RocksDBException { + final ByteBuffer buf = ByteBuffer.allocate(4); + try (final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(comparator); + final RocksDB db = + RocksDB.open(options, dir.toAbsolutePath().toString())) { + for (int i = MIN; i < MAX; i++) { + buf.putInt(i); + buf.flip(); + + db.put(buf.array(), buf.array()); + + buf.clear(); + } + } + } + + private void storeWithCppComparator(final Path dir, + final BuiltinComparator builtinComparator) throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(builtinComparator); + final RocksDB db = + RocksDB.open(options, dir.toAbsolutePath().toString())) { + + final ByteBuffer buf = ByteBuffer.allocate(4); + for (int i = MIN; i < MAX; i++) { + buf.putInt(i); + buf.flip(); + + db.put(buf.array(), buf.array()); + + buf.clear(); + } + } + } + + private int[] readAllWithJavaComparator(final Path dir, + final AbstractComparator comparator) throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(comparator); + final RocksDB db = + RocksDB.open(options, dir.toAbsolutePath().toString())) { + + try (final RocksIterator it = db.newIterator()) { + it.seekToFirst(); + + final ByteBuffer buf = ByteBuffer.allocate(4); + final int[] keys = new int[MAX - MIN]; + int idx = 0; + while (it.isValid()) { + buf.put(it.key()); + buf.flip(); + + final int thisKey = buf.getInt(); + keys[idx++] = thisKey; + + buf.clear(); + + it.next(); + } + + return keys; + } + } + } + + private int[] readAllWithCppComparator(final Path dir, + final BuiltinComparator comparator) throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(comparator); + final RocksDB db = + RocksDB.open(options, dir.toAbsolutePath().toString())) { + + try (final RocksIterator it = db.newIterator()) { + it.seekToFirst(); + + final ByteBuffer buf = ByteBuffer.allocate(4); + final int[] keys = new int[MAX - MIN]; + int idx = 0; + while (it.isValid()) { + buf.put(it.key()); + buf.flip(); + + final int thisKey = buf.getInt(); + keys[idx++] = thisKey; + + buf.clear(); + + it.next(); + } + + return keys; + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java new file mode 100644 index 000000000..ca08d9de1 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/ReverseBytewiseComparatorIntTest.java @@ -0,0 +1,270 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.rocksdb.*; + +import java.nio.ByteBuffer; +import java.nio.file.FileSystems; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Similar to {@link IntComparatorTest}, but uses + * {@link ReverseBytewiseComparator} which ensures the correct reverse + * ordering of positive integers. + */ +@RunWith(Parameterized.class) +public class ReverseBytewiseComparatorIntTest { + + // test with 500 random positive integer keys + private static final int TOTAL_KEYS = 500; + private static final byte[][] keys = new byte[TOTAL_KEYS][4]; + + @BeforeClass + public static void prepareKeys() { + final ByteBuffer buf = ByteBuffer.allocate(4); + final Random random = new Random(); + for (int i = 0; i < TOTAL_KEYS; i++) { + final int ri = random.nextInt() & Integer.MAX_VALUE; // the & ensures positive integer + buf.putInt(ri); + buf.flip(); + final byte[] key = buf.array(); + + // does key already exist (avoid duplicates) + if (keyExists(key, i)) { + i--; // loop round and generate a different key + } else { + System.arraycopy(key, 0, keys[i], 0, 4); + } + } + } + + private static boolean keyExists(final byte[] key, final int limit) { + for (int j = 0; j < limit; j++) { + if (Arrays.equals(key, keys[j])) { + return true; + } + } + return false; + } + + @Parameters(name = "{0}") + public static Iterable parameters() { + return Arrays.asList(new Object[][] { + { "non-direct_reused64_mutex", false, 64, ReusedSynchronisationType.MUTEX }, + { "direct_reused64_adaptive-mutex", true, 64, ReusedSynchronisationType.MUTEX }, + { "non-direct_reused64_adaptive-mutex", false, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX }, + { "direct_reused64_adaptive-mutex", true, 64, ReusedSynchronisationType.ADAPTIVE_MUTEX }, + { "non-direct_reused64_adaptive-mutex", false, 64, ReusedSynchronisationType.THREAD_LOCAL }, + { "direct_reused64_adaptive-mutex", true, 64, ReusedSynchronisationType.THREAD_LOCAL }, + { "non-direct_noreuse", false, -1, null }, + { "direct_noreuse", true, -1, null } + }); + } + + @Parameter(0) + public String name; + + @Parameter(1) + public boolean useDirectBuffer; + + @Parameter(2) + public int maxReusedBufferSize; + + @Parameter(3) + public ReusedSynchronisationType reusedSynchronisationType; + + @ClassRule + public static final RocksNativeLibraryResource ROCKS_NATIVE_LIBRARY_RESOURCE = + new RocksNativeLibraryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + + @Test + public void javaComparatorDefaultCf() throws RocksDBException { + try (final ComparatorOptions options = new ComparatorOptions() + .setUseDirectBuffer(useDirectBuffer) + .setMaxReusedBufferSize(maxReusedBufferSize) + // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used + .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType); + final ReverseBytewiseComparator comparator = + new ReverseBytewiseComparator(options)) { + + // test the round-tripability of keys written and read with the Comparator + testRoundtrip(FileSystems.getDefault().getPath( + dbFolder.getRoot().getAbsolutePath()), comparator); + } + } + + @Test + public void javaComparatorNamedCf() throws RocksDBException { + try (final ComparatorOptions options = new ComparatorOptions() + .setUseDirectBuffer(useDirectBuffer) + .setMaxReusedBufferSize(maxReusedBufferSize) + // if reusedSynchronisationType == null we assume that maxReusedBufferSize <= 0 and so we just set ADAPTIVE_MUTEX, even though it won't be used + .setReusedSynchronisationType(reusedSynchronisationType == null ? ReusedSynchronisationType.ADAPTIVE_MUTEX : reusedSynchronisationType); + final ReverseBytewiseComparator comparator + = new ReverseBytewiseComparator(options)) { + + // test the round-tripability of keys written and read with the Comparator + testRoundtripCf(FileSystems.getDefault().getPath( + dbFolder.getRoot().getAbsolutePath()), comparator); + } + } + + /** + * Test which stores random keys into the database + * using an {@link IntComparator} + * it then checks that these keys are read back in + * ascending order + * + * @param db_path A path where we can store database + * files temporarily + * + * @param comparator the comparator + * + * @throws RocksDBException if a database error happens. + */ + private void testRoundtrip(final Path db_path, + final AbstractComparator comparator) throws RocksDBException { + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setComparator(comparator)) { + + // store TOTAL_KEYS into the db + try (final RocksDB db = RocksDB.open(opt, db_path.toString())) { + for (int i = 0; i < TOTAL_KEYS; i++) { + db.put(keys[i], "value".getBytes(UTF_8)); + } + } + + // re-open db and read from start to end + // integer keys should be in descending + // order + final ByteBuffer key = ByteBuffer.allocate(4); + try (final RocksDB db = RocksDB.open(opt, db_path.toString()); + final RocksIterator it = db.newIterator()) { + it.seekToFirst(); + int lastKey = Integer.MAX_VALUE; + int count = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + key.put(it.key()); + key.flip(); + final int thisKey = key.getInt(); + key.clear(); + assertThat(thisKey).isLessThan(lastKey); + lastKey = thisKey; + count++; + } + assertThat(count).isEqualTo(TOTAL_KEYS); + } + } + } + + /** + * Test which stores random keys into a column family + * in the database + * using an {@link IntComparator} + * it then checks that these keys are read back in + * ascending order + * + * @param db_path A path where we can store database + * files temporarily + * + * @param comparator the comparator + * + * @throws RocksDBException if a database error happens. + */ + private void testRoundtripCf(final Path db_path, + final AbstractComparator comparator) throws RocksDBException { + + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), + new ColumnFamilyOptions() + .setComparator(comparator)) + ); + + final List cfHandles = new ArrayList<>(); + + try (final DBOptions opt = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true)) { + + try (final RocksDB db = RocksDB.open(opt, db_path.toString(), + cfDescriptors, cfHandles)) { + try { + assertThat(cfDescriptors.size()).isEqualTo(2); + assertThat(cfHandles.size()).isEqualTo(2); + + for (int i = 0; i < TOTAL_KEYS; i++) { + db.put(cfHandles.get(1), keys[i], "value".getBytes(UTF_8)); + } + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + cfHandles.clear(); + } + } + + // re-open db and read from start to end + // integer keys should be in descending + // order + final ByteBuffer key = ByteBuffer.allocate(4); + try (final RocksDB db = RocksDB.open(opt, db_path.toString(), + cfDescriptors, cfHandles); + final RocksIterator it = db.newIterator(cfHandles.get(1))) { + try { + assertThat(cfDescriptors.size()).isEqualTo(2); + assertThat(cfHandles.size()).isEqualTo(2); + + it.seekToFirst(); + int lastKey = Integer.MAX_VALUE; + int count = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + key.put(it.key()); + key.flip(); + final int thisKey = key.getInt(); + key.clear(); + assertThat(thisKey).isLessThan(lastKey); + lastKey = thisKey; + count++; + } + + assertThat(count).isEqualTo(TOTAL_KEYS); + + } finally { + for (final ColumnFamilyHandle cfHandle : cfHandles) { + cfHandle.close(); + } + cfHandles.clear(); + for (final ColumnFamilyDescriptor cfDescriptor : cfDescriptors) { + cfDescriptor.getOptions().close(); + } + } + } + } + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java new file mode 100644 index 000000000..990aa5f47 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java @@ -0,0 +1,27 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +package org.rocksdb.util; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class SizeUnitTest { + + public static final long COMPUTATION_UNIT = 1024L; + + @Test + public void sizeUnit() { + assertThat(SizeUnit.KB).isEqualTo(COMPUTATION_UNIT); + assertThat(SizeUnit.MB).isEqualTo( + SizeUnit.KB * COMPUTATION_UNIT); + assertThat(SizeUnit.GB).isEqualTo( + SizeUnit.MB * COMPUTATION_UNIT); + assertThat(SizeUnit.TB).isEqualTo( + SizeUnit.GB * COMPUTATION_UNIT); + assertThat(SizeUnit.PB).isEqualTo( + SizeUnit.TB * COMPUTATION_UNIT); + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/TestUtil.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/TestUtil.java new file mode 100644 index 000000000..e4f490c8e --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/TestUtil.java @@ -0,0 +1,72 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). + +package org.rocksdb.util; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import java.nio.ByteBuffer; +import java.util.Random; +import org.rocksdb.CompactionPriority; +import org.rocksdb.Options; +import org.rocksdb.WALRecoveryMode; + +/** + * General test utilities. + */ +public class TestUtil { + + /** + * Get the options for log iteration tests. + * + * @return the options + */ + public static Options optionsForLogIterTest() { + return defaultOptions() + .setCreateIfMissing(true) + .setWalTtlSeconds(1000); + } + + /** + * Get the default options. + * + * @return the options + */ + public static Options defaultOptions() { + return new Options() + .setWriteBufferSize(4090 * 4096) + .setTargetFileSizeBase(2 * 1024 * 1024) + .setMaxBytesForLevelBase(10 * 1024 * 1024) + .setMaxOpenFiles(5000) + .setWalRecoveryMode(WALRecoveryMode.TolerateCorruptedTailRecords) + .setCompactionPriority(CompactionPriority.ByCompensatedSize); + } + + private static final Random random = new Random(); + + /** + * Generate a random string of bytes. + * + * @param len the length of the string to generate. + * + * @return the random string of bytes + */ + public static byte[] dummyString(final int len) { + final byte[] str = new byte[len]; + random.nextBytes(str); + return str; + } + + /** + * Copy a {@link ByteBuffer} into an array for shorthand ease of test coding + * @param byteBuffer the buffer to copy + * @return a {@link byte[]} containing the same bytes as the input + */ + public static byte[] bufferBytes(final ByteBuffer byteBuffer) { + final byte[] result = new byte[byteBuffer.limit()]; + byteBuffer.get(result); + return result; + } +} diff --git a/src/rocksdb/java/src/test/java/org/rocksdb/util/WriteBatchGetter.java b/src/rocksdb/java/src/test/java/org/rocksdb/util/WriteBatchGetter.java new file mode 100644 index 000000000..2efa16473 --- /dev/null +++ b/src/rocksdb/java/src/test/java/org/rocksdb/util/WriteBatchGetter.java @@ -0,0 +1,139 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +package org.rocksdb.util; + +import org.rocksdb.RocksDBException; +import org.rocksdb.WriteBatch; + +import java.util.Arrays; + +public class WriteBatchGetter extends WriteBatch.Handler { + + private int columnFamilyId = -1; + private final byte[] key; + private byte[] value; + + public WriteBatchGetter(final byte[] key) { + this.key = key; + } + + public byte[] getValue() { + return value; + } + + @Override + public void put(final int columnFamilyId, final byte[] key, + final byte[] value) { + if(Arrays.equals(this.key, key)) { + this.columnFamilyId = columnFamilyId; + this.value = value; + } + } + + @Override + public void put(final byte[] key, final byte[] value) { + if(Arrays.equals(this.key, key)) { + this.value = value; + } + } + + @Override + public void merge(final int columnFamilyId, final byte[] key, + final byte[] value) { + if(Arrays.equals(this.key, key)) { + this.columnFamilyId = columnFamilyId; + this.value = value; + } + } + + @Override + public void merge(final byte[] key, final byte[] value) { + if(Arrays.equals(this.key, key)) { + this.value = value; + } + } + + @Override + public void delete(final int columnFamilyId, final byte[] key) { + if(Arrays.equals(this.key, key)) { + this.columnFamilyId = columnFamilyId; + this.value = null; + } + } + + @Override + public void delete(final byte[] key) { + if(Arrays.equals(this.key, key)) { + this.value = null; + } + } + + @Override + public void singleDelete(final int columnFamilyId, final byte[] key) { + if(Arrays.equals(this.key, key)) { + this.columnFamilyId = columnFamilyId; + this.value = null; + } + } + + @Override + public void singleDelete(final byte[] key) { + if(Arrays.equals(this.key, key)) { + this.value = null; + } + } + + @Override + public void deleteRange(final int columnFamilyId, final byte[] beginKey, + final byte[] endKey) { + throw new UnsupportedOperationException(); + } + + @Override + public void deleteRange(final byte[] beginKey, final byte[] endKey) { + throw new UnsupportedOperationException(); + } + + @Override + public void logData(final byte[] blob) { + throw new UnsupportedOperationException(); + } + + @Override + public void putBlobIndex(final int columnFamilyId, final byte[] key, + final byte[] value) { + if(Arrays.equals(this.key, key)) { + this.columnFamilyId = columnFamilyId; + this.value = value; + } + } + + @Override + public void markBeginPrepare() throws RocksDBException { + throw new UnsupportedOperationException(); + } + + @Override + public void markEndPrepare(final byte[] xid) throws RocksDBException { + throw new UnsupportedOperationException(); + } + + @Override + public void markNoop(final boolean emptyBatch) throws RocksDBException { + throw new UnsupportedOperationException(); + } + + @Override + public void markRollback(final byte[] xid) throws RocksDBException { + throw new UnsupportedOperationException(); + } + + @Override + public void markCommit(final byte[] xid) throws RocksDBException { + throw new UnsupportedOperationException(); + } + + @Override + public void markCommitWithTimestamp(final byte[] xid, final byte[] ts) throws RocksDBException { + throw new UnsupportedOperationException(); + } +} diff --git a/src/rocksdb/java/understanding_options.md b/src/rocksdb/java/understanding_options.md new file mode 100644 index 000000000..0393aff4d --- /dev/null +++ b/src/rocksdb/java/understanding_options.md @@ -0,0 +1,79 @@ +# How RocksDB Options and their Java Wrappers Work + +Options in RocksDB come in many different flavours. This is an attempt at a taxonomy and explanation. + +## RocksDB Options + +Initially, I believe, RocksDB had only database options. I don't know if any of these were mutable. Column families came later. Read on to understand the terminology. + +So to begin, one sets up a collection of options and starts/creates a database with these options. That's a useful way to think about it, because from a Java point-of-view (and I didn't realise this initially and got very confused), despite making native calls to C++, the `API`s are just manipulating a native C++ configuration object. This object is just a record of configuration, and it must later be passed to the database (at create or open time) in order to apply the options. + +### Database versus Column Family + +The concept of the *column family* or `CF` is widespread within RocksDB. I think of it as a data namespace, but conveniently transactions can operate across these namespaces. The concept of a default column family exists, and when operations do not refer to a particular `CF`, it refers to the default. + +We raise this w.r.t. options because many options, perhaps most that users encounter, are *column family options*. That is to say they apply individually to a particular column family, or to the default column family. Crucially also, many/most/all of these same options are exposed as *database options* and then apply as the default for column families which do not have the option set explicitly. Obviously some database options are naturally database-wide; they apply to the operation of the database and don't make any sense applied to a column family. + +### Mutability + +There are 2 kinds of options + +- Mutable options +- Immutable options. We name these in contrast to the mutable ones, but they are usually referred to unqualified. + +Mutable options are those which can be changed on a running `RocksDB` instance. Immutable options can only be configured prior to the start of a database. Of course, we can configure the immutable options at this time too; The entirety of options is a strict superset of the mutable options. + +Mutable options (whether column-family specific or database-wide) are manipulated at runtime with builders, so we have `MutableDBOptions.MutableDBOptionsBuilder` and `MutableColumnFamilyOptions.MutableColumnFamilyOptionsBuilder` which share tooling classes/hierarchy and maintain and manipulate the relevant options as a `(key,value)` map. + +Mutable options are then passed using `setOptions()` and `setDBOptions()` methods on the live RocksDB, and then take effect immediately (depending on the semantics of the option) on the database. + +### Advanced + +There are 2 classes of options + +- Advanced options +- Non-advanced options + +It's not clear to me what the conceptual distinction is between advanced and not. However, the Java code takes care to reflect it from the underlying C++. + +This leads to 2 separate type hierarchies within column family options, one for each `class` of options. The `kind`s are represented by where the options appear in their hierarchy. + +```java +interface ColumnFamilyOptionsInterface> + extends AdvancedColumnFamilyOptionsInterface +interface MutableColumnFamilyOptionsInterface> + extends AdvancedMutableColumnFamilyOptionsInterface +``` + +And then there is ultimately a single concrete implementation class for CF options: + +```java +class ColumnFamilyOptions extends RocksObject + implements ColumnFamilyOptionsInterface, + MutableColumnFamilyOptionsInterface +``` + +as there is a single concrete implementation class for DB options: + +```java +class DBOptions extends RocksObject + implements DBOptionsInterface, + MutableDBOptionsInterface +``` + +Interestingly `DBOptionsInterface` doesn't extend `MutableDBOptionsInterface`, if only in order to disrupt our belief in consistent basic laws of the Universe. + +## Startup/Creation Options + +```java +class Options extends RocksObject + implements DBOptionsInterface, + MutableDBOptionsInterface, + ColumnFamilyOptionsInterface, + MutableColumnFamilyOptionsInterface +``` + +### Example - Blob Options + +The `enable_blob_files` and `min_blob_size` options are per-column-family, and are mutable. The options also appear in the unqualified database options. So by initial configuration, we can set up a RocksDB database where for every `(key,value)` with a value of size at least `min_blob_size`, the value is written (indirected) to a blob file. Blobs may share a blob file, subject to the configuration values set. Later, using the `MutableColumnFamilyOptionsInterface` of the `ColumnFamilyOptions`, we can choose to turn this off (`enable_blob_files=false`) , or alter the `min_blob_size` for the default column family, or any other column family. It seems to me that we cannot, though, mutate the column family options for all column families using the +`setOptions()` mechanism, either for all existing column families or for all future column families; but maybe we can do the latter on a re-`open()/create()' -- cgit v1.2.3