Skip to content

Commit

Permalink
Update to RocksDB 9.0.0
Browse files Browse the repository at this point in the history
  • Loading branch information
zaidoon1 committed Mar 19, 2024
1 parent 60f783b commit 7f6c160
Show file tree
Hide file tree
Showing 6 changed files with 101 additions and 139 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ portable = ["rust-librocksdb-sys/portable"]

[dependencies]
libc = "0.2"
rust-librocksdb-sys = { path = "librocksdb-sys", version = "0.18.2" }
rust-librocksdb-sys = { path = "librocksdb-sys", version = "0.19.0" }
serde = { version = "1", features = ["derive"], optional = true }

[dev-dependencies]
Expand Down
2 changes: 1 addition & 1 deletion librocksdb-sys/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "rust-librocksdb-sys"
version = "0.18.2+8.11.3"
version = "0.19.0+9.0.0"
edition = "2021"
rust-version = "1.75.0"
authors = [
Expand Down
8 changes: 4 additions & 4 deletions librocksdb-sys/build_version.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,17 @@

// The build script may replace these values with real values based
// on whether or not GIT is available and the platform settings
static const std::string rocksdb_build_git_sha = "c2467b141e840fdba5b3a1810763043e56449fb9";
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:v8.11.3";
static const std::string rocksdb_build_git_sha = "f4441966592636253fd5ab0bb9ed44fc2697fc53";
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:v9.0.0";
#define HAS_GIT_CHANGES 0
#if HAS_GIT_CHANGES == 0
// If HAS_GIT_CHANGES is 0, the GIT date is used.
// Use the time the branch/tag was last modified
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-02-27 16:24:11";
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-03-18 15:15:28";
#else
// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications.
// Use the time the build was created.
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-02-27 16:24:11";
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-03-18 15:15:28";
#endif

std::unordered_map<std::string, ROCKSDB_NAMESPACE::RegistrarFunc> ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = {};
Expand Down
2 changes: 1 addition & 1 deletion librocksdb-sys/rocksdb
Submodule rocksdb updated 511 files
22 changes: 0 additions & 22 deletions src/db_options.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2886,17 +2886,6 @@ impl Options {
}
}

/// Specifies the file access pattern once a compaction is started.
///
/// It will be applied to all input files of a compaction.
///
/// Default: Normal
pub fn set_access_hint_on_compaction_start(&mut self, pattern: AccessHint) {
unsafe {
ffi::rocksdb_options_set_access_hint_on_compaction_start(self.inner, pattern as c_int);
}
}

/// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
///
/// This could reduce context switch when the mutex is not
Expand Down Expand Up @@ -4047,17 +4036,6 @@ pub enum DBRecoveryMode {
SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
}

/// File access pattern once a compaction has started
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
#[repr(i32)]
pub enum AccessHint {
None = 0,
Normal,
Sequential,
WillNeed,
}

pub struct FifoCompactOptions {
pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
}
Expand Down
204 changes: 94 additions & 110 deletions src/statistics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,13 +146,55 @@ iterable_named_enum! {
/// # of bytes written into cache.
BlockCacheBytesWrite("rocksdb.block.cache.bytes.write"),

BlockCacheCompressionDictMiss("rocksdb.block.cache.compression.dict.miss"),
BlockCacheCompressionDictHit("rocksdb.block.cache.compression.dict.hit"),
BlockCacheCompressionDictAdd("rocksdb.block.cache.compression.dict.add"),
BlockCacheCompressionDictBytesInsert("rocksdb.block.cache.compression.dict.bytes.insert"),

/// # of blocks redundantly inserted into block cache.
/// REQUIRES: BLOCK_CACHE_ADD_REDUNDANT <= BLOCK_CACHE_ADD
BlockCacheAddRedundant("rocksdb.block.cache.add.redundant"),
/// # of index blocks redundantly inserted into block cache.
/// REQUIRES: BLOCK_CACHE_INDEX_ADD_REDUNDANT <= BLOCK_CACHE_INDEX_ADD
BlockCacheIndexAddRedundant("rocksdb.block.cache.index.add.redundant"),
/// # of filter blocks redundantly inserted into block cache.
/// REQUIRES: BLOCK_CACHE_FILTER_ADD_REDUNDANT <= BLOCK_CACHE_FILTER_ADD
BlockCacheFilterAddRedundant("rocksdb.block.cache.filter.add.redundant"),
/// # of data blocks redundantly inserted into block cache.
/// REQUIRES: BLOCK_CACHE_DATA_ADD_REDUNDANT <= BLOCK_CACHE_DATA_ADD
BlockCacheDataAddRedundant("rocksdb.block.cache.data.add.redundant"),
// # of dict blocks redundantly inserted into block cache.
// REQUIRES: BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT
// <= BLOCK_CACHE_COMPRESSION_DICT_ADD
BlockCacheCompressionDictAddRedundant("rocksdb.block.cache.compression.dict.add.redundant"),

SecondaryCacheHits("rocksdb.secondary.cache.hits"),
SecondaryCacheFilterHits("rocksdb.secondary.cache.filter.hits"),
SecondaryCacheIndexHits("rocksdb.secondary.cache.index.hits"),
SecondaryCacheDataHits("rocksdb.secondary.cache.data.hits"),

CompressedSecondaryCacheDummyHits("rocksdb.compressed.secondary.cache.dummy.hits"),
CompressedSecondaryCacheHits("rocksdb.compressed.secondary.cache.hits"),
CompressedSecondaryCachePromotions("rocksdb.compressed.secondary.cache.promotions"),
CompressedSecondaryCachePromotionSkips("rocksdb.compressed.secondary.cache.promotion.skips"),

/// # of times bloom filter has avoided file reads, i.e., negatives.
BloomFilterUseful("rocksdb.bloom.filter.useful"),
/// # of times bloom FullFilter has not avoided the reads.
BloomFilterFullPositive("rocksdb.bloom.filter.full.positive"),
/// # of times bloom FullFilter has not avoided the reads and data actually
/// exist.
BloomFilterFullTruePositive("rocksdb.bloom.filter.full.true.positive"),
/// Prefix filter stats when used for point lookups (Get / MultiGet).
/// (For prefix filter stats on iterators, see *_LEVEL_Seek_*.)
/// Checked: filter was queried
BloomFilterPrefixChecked("rocksdb.bloom.filter.prefix.checked"),
/// Useful: filter returned false so prevented accessing data+index blocks
BloomFilterPrefixUseful("rocksdb.bloom.filter.prefix.useful"),
/// True positive: found a key matching the point query. When another key
/// with the same prefix matches, it is considered a false positive by
/// these statistics even though the filter returned a true positive.
BloomFilterPrefixTruePositive("rocksdb.bloom.filter.prefix.true.positive"),

/// # persistent cache hit
PersistentCacheHit("rocksdb.persistent.cache.hit"),
Expand Down Expand Up @@ -221,6 +263,16 @@ iterable_named_enum! {
/// The number of uncompressed bytes read from an iterator.
/// Includes size of key and value.
IterBytesRead("rocksdb.db.iter.bytes.read"),
/// Number of internal keys skipped by Iterator
NumberIterSkip("rocksdb.number.iter.skip"),
/// Number of times we had to reseek inside an iteration to skip
/// over large number of keys with same userkey.
NumberOfReseeksInIteration("rocksdb.number.reseeks.iteration"),

NoIteratorCreated("rocksdb.num.iterator.created"),
/// number of iterators created
NoIteratorDeleted("rocksdb.num.iterator.deleted"),

NoFileOpens("rocksdb.no.file.opens"),
NoFileErrors("rocksdb.no.file.errors"),
/// Writer has to wait for compaction or flush to finish.
Expand All @@ -233,24 +285,10 @@ iterable_named_enum! {
NumberMultigetCalls("rocksdb.number.multiget.get"),
NumberMultigetKeysRead("rocksdb.number.multiget.keys.read"),
NumberMultigetBytesRead("rocksdb.number.multiget.bytes.read"),
NumberMultigetKeysFound("rocksdb.number.multiget.keys.found"),

NumberMergeFailures("rocksdb.number.merge.failures"),

/// Prefix filter stats when used for point lookups (Get / MultiGet).
/// (For prefix filter stats on iterators, see *_LEVEL_Seek_*.)
/// Checked: filter was queried
BloomFilterPrefixChecked("rocksdb.bloom.filter.prefix.checked"),
/// Useful: filter returned false so prevented accessing data+index blocks
BloomFilterPrefixUseful("rocksdb.bloom.filter.prefix.useful"),
/// True positive: found a key matching the point query. When another key
/// with the same prefix matches, it is considered a false positive by
/// these statistics even though the filter returned a true positive.
BloomFilterPrefixTruePositive("rocksdb.bloom.filter.prefix.true.positive"),

/// Number of times we had to reseek inside an iteration to skip
/// over large number of keys with same userkey.
NumberOfReseeksInIteration("rocksdb.number.reseeks.iteration"),

/// Record the number of calls to GetUpdatesSince. Useful to keep track of
/// transaction log iterator refreshes
GetUpdatesSinceCalls("rocksdb.getupdatessince.calls"),
Expand Down Expand Up @@ -291,9 +329,33 @@ iterable_named_enum! {
/// # of compressions/decompressions executed
NumberBlockCompressed("rocksdb.number.block.compressed"),
NumberBlockDecompressed("rocksdb.number.block.decompressed"),

/// DEPRECATED / unused (see NumberBlockCompression_*)
NumberBlockNotCompressed("rocksdb.number.block.not_compressed"),
/// Number of input bytes (uncompressed) to compression for SST blocks that
/// are stored compressed.
BytesCompressedFrom("rocksdb.bytes.compressed.from"),
/// Number of output bytes (compressed) from compression for SST blocks that
/// are stored compressed.
BytesCompressedTo("rocksdb.bytes.compressed.to"),
/// Number of uncompressed bytes for SST blocks that are stored uncompressed
/// because compression type is kNoCompression, or some error case caused
/// compression not to run or produce an output. Index blocks are only counted
/// if enable_index_compression is true.
BytesCompressionBypassed("rocksdb.bytes.compression_bypassed"),
/// Number of input bytes (uncompressed) to compression for SST blocks that
/// are stored uncompressed because the compression result was rejected,
/// either because the ratio was not acceptable (see
/// CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the
/// `verify_compression` option.
BytesCompressionRejected("rocksdb.bytes.compression.rejected"),
/// Like BytesCompressionBypassed but counting number of blocks
NumberBlockCompressionBypassed("rocksdb.number.block_compression_bypassed"),
/// Like BytesCompressionRejected but counting number of blocks
NumberBlockCompressionRejected("rocksdb.number.block_compression_rejected"),
/// Number of input bytes (compressed) to decompression in reading compressed
/// SST blocks from storage.
BytesDecompressedFrom("rocksdb.bytes.decompressed.from"),
/// Number of output bytes (uncompressed) from decompression in reading
/// compressed SST blocks from storage.
BytesDecompressedTo("rocksdb.bytes.decompressed.to"),

/// Tickers that record cumulative time.
MergeOperationTotalTime("rocksdb.merge.operation.time.nanos"),
Expand All @@ -317,9 +379,6 @@ iterable_named_enum! {
/// Number of refill intervals where rate limiter's bytes are fully consumed.
NumberRateLimiterDrains("rocksdb.number.rate_limiter.drains"),

/// Number of internal keys skipped by Iterator
NumberIterSkip("rocksdb.number.iter.skip"),

/// BlobDB specific stats
/// # of Put/PutTtl/PutUntil to BlobDB. Only applicable to legacy BlobDB.
BlobDbNumPut("rocksdb.blobdb.num.put"),
Expand Down Expand Up @@ -398,6 +457,20 @@ iterable_named_enum! {
/// applicable to legacy BlobDB.
BlobDbFifoBytesEvicted("rocksdb.blobdb.fifo.bytes.evicted"),

/// Integrated BlobDB specific stats
/// # of times cache miss when accessing blob from blob cache.
BlobDbCacheMiss("rocksdb.blobdb.cache.miss"),
/// # of times cache hit when accessing blob from blob cache.
BlobDbCacheHit("rocksdb.blobdb.cache.hit"),
/// # of data blocks added to blob cache.
BlobDbCacheAdd("rocksdb.blobdb.cache.add"),
/// # of failures when adding blobs to blob cache.
BlobDbCacheAddFailures("rocksdb.blobdb.cache.add.failures"),
/// # of bytes read from blob cache.
BlobDbCacheBytesRead("rocksdb.blobdb.cache.bytes.read"),
/// # of bytes written into blob cache.
BlobDbCacheBytesWrite("rocksdb.blobdb.cache.bytes.write"),

/// These counters indicate a performance issue in WritePrepared transactions.
/// We should not seem them ticking them much.
/// # of times prepare_mutex_ is acquired in the fast path.
Expand All @@ -411,37 +484,6 @@ iterable_named_enum! {
/// # of times ::Get returned TryAgain due to expired snapshot seq
TxnGetTryAgain("rocksdb.txn.get.tryagain"),

/// Number of keys actually found in MultiGet calls (vs number requested by
/// caller)
/// NumberMultigetKeys_Read gives the number requested by caller
NumberMultigetKeysFound("rocksdb.number.multiget.keys.found"),

NoIteratorCreated("rocksdb.num.iterator.created"),
/// number of iterators created
NoIteratorDeleted("rocksdb.num.iterator.deleted"),
/// number of iterators deleted
BlockCacheCompressionDictMiss("rocksdb.block.cache.compression.dict.miss"),
BlockCacheCompressionDictHit("rocksdb.block.cache.compression.dict.hit"),
BlockCacheCompressionDictAdd("rocksdb.block.cache.compression.dict.add"),
BlockCacheCompressionDictBytesInsert("rocksdb.block.cache.compression.dict.bytes.insert"),

/// # of blocks redundantly inserted into block cache.
/// REQUIRES: BlockCacheAddRedundant <= BlockCacheAdd
BlockCacheAddRedundant("rocksdb.block.cache.add.redundant"),
/// # of index blocks redundantly inserted into block cache.
/// REQUIRES: BlockCacheIndexAddRedundant <= BlockCacheIndexAdd
BlockCacheIndexAddRedundant("rocksdb.block.cache.index.add.redundant"),
/// # of filter blocks redundantly inserted into block cache.
/// REQUIRES: BlockCacheFilterAddRedundant <= BlockCacheFilterAdd
BlockCacheFilterAddRedundant("rocksdb.block.cache.filter.add.redundant"),
/// # of data blocks redundantly inserted into block cache.
/// REQUIRES: BlockCacheDataAddRedundant <= BlockCacheDataAdd
BlockCacheDataAddRedundant("rocksdb.block.cache.data.add.redundant"),
/// # of dict blocks redundantly inserted into block cache.
/// REQUIRES: BlockCacheCompressionDictAddRedundant
/// <= BlockCacheCompressionDictAdd
BlockCacheCompressionDictAddRedundant("rocksdb.block.cache.compression.dict.add.redundant"),

/// # of files marked as trash by sst file manager and will be deleted
/// later by background thread.
FilesMarkedTrash("rocksdb.files.marked.trash"),
Expand Down Expand Up @@ -471,9 +513,6 @@ iterable_named_enum! {
/// Outdated bytes of data present on memtable at flush time.
MemtableGarbageBytesAtFlush("rocksdb.memtable.garbage.bytes.at.flush"),

/// Secondary cache statistics
SecondaryCacheHits("rocksdb.secondary.cache.hits"),

/// Bytes read by `VerifyChecksum()` and `VerifyFileChecksums()` APIs.
VerifyChecksumReadBytes("rocksdb.verify_checksum.read.bytes"),

Expand Down Expand Up @@ -534,30 +573,11 @@ iterable_named_enum! {

MultigetCoroutineCount("rocksdb.multiget.coroutine.count"),

/// Integrated BlobDB specific stats
/// # of times cache miss when accessing blob from blob cache.
BlobDbCacheMiss("rocksdb.blobdb.cache.miss"),
/// # of times cache hit when accessing blob from blob cache.
BlobDbCacheHit("rocksdb.blobdb.cache.hit"),
/// # of data blocks added to blob cache.
BlobDbCacheAdd("rocksdb.blobdb.cache.add"),
/// # of failures when adding blobs to blob cache.
BlobDbCacheAddFailures("rocksdb.blobdb.cache.add.failures"),
/// # of bytes read from blob cache.
BlobDbCacheBytesRead("rocksdb.blobdb.cache.bytes.read"),
/// # of bytes written into blob cache.
BlobDbCacheBytesWrite("rocksdb.blobdb.cache.bytes.write"),

/// Time spent in the ReadAsync file system call
ReadAsyncMicros("rocksdb.read.async.micros"),
/// Number of errors returned to the async read callback
AsyncReadErrorCount("rocksdb.async.read.error.count"),

/// Fine grained secondary cache stats
SecondaryCacheFilterHits("rocksdb.secondary.cache.filter.hits"),
SecondaryCacheIndexHits("rocksdb.secondary.cache.index.hits"),
SecondaryCacheDataHits("rocksdb.secondary.cache.data.hits"),

/// Number of lookup into the prefetched tail (see
/// `TableOpenPrefetchTailReadBytes`)
/// that can't find its data for table open
Expand All @@ -573,36 +593,6 @@ iterable_named_enum! {
/// # of times timestamps can successfully help skip the table access
TimestampFilterTableFiltered("rocksdb.timestamp.filter.table.filtered"),

/// Number of input bytes (uncompressed) to compression for SST blocks that
/// are stored compressed.
BytesCompressedFrom("rocksdb.bytes.compressed.from"),
/// Number of output bytes (compressed) from compression for SST blocks that
/// are stored compressed.
BytesCompressedTo("rocksdb.bytes.compressed.to"),
/// Number of uncompressed bytes for SST blocks that are stored uncompressed
/// because compression type is kNoCompression, or some error case caused
/// compression not to run or produce an output. Index blocks are only counted
/// if enable_index_compression is true.
BytesCompressionBypassed("rocksdb.bytes.compression_bypassed"),
/// Number of input bytes (uncompressed) to compression for SST blocks that
/// are stored uncompressed because the compression result was rejected,
/// either because the ratio was not acceptable (see
/// CompressionOptions::max_compressed_bytes_per_kb) or found invalid by the
/// `verify_compression` option.
BytesCompressionRejected("rocksdb.bytes.compression.rejected"),

/// Like BytesCompressionBypassed but counting number of blocks
NumberBlockCompressionBypassed("rocksdb.number.block_compression_bypassed"),
/// Like BytesCompressionRejected but counting number of blocks
NumberBlockCompressionRejected("rocksdb.number.block_compression_rejected"),

/// Number of input bytes (compressed) to decompression in reading compressed
/// SST blocks from storage.
BytesDecompressedFrom("rocksdb.bytes.decompressed.from"),
/// Number of output bytes (uncompressed) from decompression in reading
/// compressed SST blocks from storage.
BytesDecompressedTo("rocksdb.bytes.decompressed.to"),

/// Number of times readahead is trimmed during scans when
/// ReadOptions.auto_readahead_size is set.
ReadAheadTrimmed("rocksdb.readahead.trimmed"),
Expand All @@ -619,12 +609,6 @@ iterable_named_enum! {

/// Number of FS reads avoided due to scan prefetching
PrefetchHits("rocksdb.prefetch.hits"),

/// Compressed secondary cache related stats
CompressedSecondaryCacheDummyHits("rocksdb.compressed.secondary.cache.dummy.hits"),
CompressedSecondaryCacheHits("rocksdb.compressed.secondary.cache.hits"),
CompressedSecondaryCachePromotions("rocksdb.compressed.secondary.cache.promotions"),
CompressedSecondaryCachePromotionSkips("rocksdb.compressed.secondary.cache.promotion.skips"),
}
}

Expand Down

0 comments on commit 7f6c160

Please sign in to comment.