diff --git a/java/org/rocksdb/Options.java b/java/org/rocksdb/Options.java index ff289b77684c08cbef87f74650982fd67024e060..0c5e401b9cea622b35d02e779a703fa3db68bc1e 100644 --- a/java/org/rocksdb/Options.java +++ b/java/org/rocksdb/Options.java @@ -1199,6 +1199,1054 @@ public class Options { return this; } +/////////////////////////////////////////////////////////////////////// + /** + * Number of keys between restart points for delta encoding of keys. + * This parameter can be changed dynamically. Most clients should + * leave this parameter alone. + * Default: 16 + * + * @return the number of keys between restart points. + */ + public int blockRestartInterval() { + return blockRestartInterval(nativeHandle_); + } + private native int blockRestartInterval(long handle); + + /** + * Number of keys between restart points for delta encoding of keys. + * This parameter can be changed dynamically. Most clients should + * leave this parameter alone. + * Default: 16 + * + * @param blockRestartInterval the number of keys between restart points. + * @return the reference to the current option. + */ + public Options setBlockRestartInterval(int blockRestartInterval) { + setBlockRestartInterval(nativeHandle_, blockRestartInterval); + return this; + } + private native void setBlockRestartInterval( + long handle, int blockRestartInterval); + + /** + * If true, place whole keys in the filter (not just prefixes). + * This must generally be true for gets to be efficient. + * Default: true + * + * @return if true, then whole-key-filtering is on. + */ + public boolean wholeKeyFiltering() { + return wholeKeyFiltering(nativeHandle_); + } + private native boolean wholeKeyFiltering(long handle); + + /** + * If true, place whole keys in the filter (not just prefixes). + * This must generally be true for gets to be efficient. + * Default: true + * + * @param wholeKeyFiltering if true, then whole-key-filtering is on. + * @return the reference to the current option. + */ + public Options setWholeKeyFiltering(boolean wholeKeyFiltering) { + setWholeKeyFiltering(nativeHandle_, wholeKeyFiltering); + return this; + } + private native void setWholeKeyFiltering( + long handle, boolean wholeKeyFiltering); + + /** + * If level-styled compaction is used, then this number determines + * the total number of levels. + * + * @return the number of levels. + */ + public int numLevels() { + return numLevels(nativeHandle_); + } + private native int numLevels(long handle); + + /** + * Set the number of levels for this database + * If level-styled compaction is used, then this number determines + * the total number of levels. + * + * @param numLevels the number of levels. + * @return the reference to the current option. + */ + public Options setNumLevels(int numLevels) { + setNumLevels(nativeHandle_, numLevels); + return this; + } + private native void setNumLevels( + long handle, int numLevels); + + /** + * The number of files in leve 0 to trigger compaction from level-0 to + * level-1. A value < 0 means that level-0 compaction will not be + * triggered by number of files at all. + * Default: 4 + * + * @return the number of files in level 0 to trigger compaction. + */ + public int levelZeroFileNumCompactionTrigger() { + return levelZeroFileNumCompactionTrigger(nativeHandle_); + } + private native int levelZeroFileNumCompactionTrigger(long handle); + + /** + * Number of files to trigger level-0 compaction. A value <0 means that + * level-0 compaction will not be triggered by number of files at all. + * Default: 4 + * + * @param numFiles the number of files in level-0 to trigger compaction. + * @return the reference to the current option. + */ + public Options setLevelZeroFileNumCompactionTrigger( + int numFiles) { + setLevelZeroFileNumCompactionTrigger( + nativeHandle_, numFiles); + return this; + } + private native void setLevelZeroFileNumCompactionTrigger( + long handle, int numFiles); + + /** + * Soft limit on the number of level-0 files. We start slowing down writes + * at this point. A value < 0 means that no writing slow down will be + * triggered by number of files in level-0. + * + * @return the soft limit on the number of level-0 files. + */ + public int levelZeroSlowdownWritesTrigger() { + return levelZeroSlowdownWritesTrigger(nativeHandle_); + } + private native int levelZeroSlowdownWritesTrigger(long handle); + + /** + * Soft limit on number of level-0 files. We start slowing down writes at this + * point. A value <0 means that no writing slow down will be triggered by + * number of files in level-0. + * + * @param numFiles soft limit on number of level-0 files. + * @return the reference to the current option. + */ + public Options setLevelZeroSlowdownWritesTrigger( + int numFiles) { + setLevelZeroSlowdownWritesTrigger(nativeHandle_, numFiles); + return this; + } + private native void setLevelZeroSlowdownWritesTrigger( + long handle, int numFiles); + + /** + * Maximum number of level-0 files. We stop writes at this point. + * + * @return the hard limit of the number of level-0 file. + */ + public int levelZeroStopWritesTrigger() { + return levelZeroStopWritesTrigger(nativeHandle_); + } + private native int levelZeroStopWritesTrigger(long handle); + + /** + * Maximum number of level-0 files. We stop writes at this point. + * + * @param numFiles the hard limit of the number of level-0 files. + * @return the reference to the current option. + */ + public Options setLevelZeroStopWritesTrigger(int numFiles) { + setLevelZeroStopWritesTrigger(nativeHandle_, numFiles); + return this; + } + private native void setLevelZeroStopWritesTrigger( + long handle, int numFiles); + + /** + * The highest level to which a new compacted memtable is pushed if it + * does not create overlap. We try to push to level 2 to avoid the + * relatively expensive level 0=>1 compactions and to avoid some + * expensive manifest file operations. We do not push all the way to + * the largest level since that can generate a lot of wasted disk + * space if the same key space is being repeatedly overwritten. + * + * @return the highest level where a new compacted memtable will be pushed. + */ + public int maxMemCompactionLevel() { + return maxMemCompactionLevel(nativeHandle_); + } + private native int maxMemCompactionLevel(long handle); + + /** + * The highest level to which a new compacted memtable is pushed if it + * does not create overlap. We try to push to level 2 to avoid the + * relatively expensive level 0=>1 compactions and to avoid some + * expensive manifest file operations. We do not push all the way to + * the largest level since that can generate a lot of wasted disk + * space if the same key space is being repeatedly overwritten. + * + * @param maxMemCompactionLevel the highest level to which a new compacted + * mem-table will be pushed. + * @return the reference to the current option. + */ + public Options setMaxMemCompactionLevel(int maxMemCompactionLevel) { + setMaxMemCompactionLevel(nativeHandle_, maxMemCompactionLevel); + return this; + } + private native void setMaxMemCompactionLevel( + long handle, int maxMemCompactionLevel); + + /** + * The target file size for compaction. + * This targetFileSizeBase determines a level-1 file size. + * Target file size for level L can be calculated by + * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) + * For example, if targetFileSizeBase is 2MB and + * target_file_size_multiplier is 10, then each file on level-1 will + * be 2MB, and each file on level 2 will be 20MB, + * and each file on level-3 will be 200MB. + * by default targetFileSizeBase is 2MB. + * + * @return the target size of a level-0 file. + * + * @see targetFileSizeMultiplier() + */ + public int targetFileSizeBase() { + return targetFileSizeBase(nativeHandle_); + } + private native int targetFileSizeBase(long handle); + + /** + * The target file size for compaction. + * This targetFileSizeBase determines a level-1 file size. + * Target file size for level L can be calculated by + * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) + * For example, if targetFileSizeBase is 2MB and + * target_file_size_multiplier is 10, then each file on level-1 will + * be 2MB, and each file on level 2 will be 20MB, + * and each file on level-3 will be 200MB. + * by default targetFileSizeBase is 2MB. + * + * @param targetFileSizeBase the target size of a level-0 file. + * @return the reference to the current option. + * + * @see setTargetFileSizeMultiplier() + */ + public Options setTargetFileSizeBase(int targetFileSizeBase) { + setTargetFileSizeBase(nativeHandle_, targetFileSizeBase); + return this; + } + private native void setTargetFileSizeBase( + long handle, int targetFileSizeBase); + + /** + * targetFileSizeMultiplier defines the size ratio between a + * level-(L+1) file and level-L file. + * By default targetFileSizeMultiplier is 1, meaning + * files in different levels have the same target. + * + * @return the size ratio between a level-(L+1) file and level-L file. + */ + public int targetFileSizeMultiplier() { + return targetFileSizeMultiplier(nativeHandle_); + } + private native int targetFileSizeMultiplier(long handle); + + /** + * targetFileSizeMultiplier defines the size ratio between a + * level-L file and level-(L+1) file. + * By default target_file_size_multiplier is 1, meaning + * files in different levels have the same target. + * + * @param multiplier the size ratio between a level-(L+1) file + * and level-L file. + * @return the reference to the current option. + */ + public Options setTargetFileSizeMultiplier(int multiplier) { + setTargetFileSizeMultiplier(nativeHandle_, multiplier); + return this; + } + private native void setTargetFileSizeMultiplier( + long handle, int multiplier); + + /** + * The upper-bound of the total size of level-1 files in bytes. + * Maximum number of bytes for level L can be calculated as + * (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1)) + * For example, if maxBytesForLevelBase is 20MB, and if + * max_bytes_for_level_multiplier is 10, total data size for level-1 + * will be 20MB, total file size for level-2 will be 200MB, + * and total file size for level-3 will be 2GB. + * by default 'maxBytesForLevelBase' is 10MB. + * + * @return the upper-bound of the total size of leve-1 files in bytes. + * @see maxBytesForLevelMultiplier() + */ + public long maxBytesForLevelBase() { + return maxBytesForLevelBase(nativeHandle_); + } + private native long maxBytesForLevelBase(long handle); + + /** + * The upper-bound of the total size of level-1 files in bytes. + * Maximum number of bytes for level L can be calculated as + * (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1)) + * For example, if maxBytesForLevelBase is 20MB, and if + * max_bytes_for_level_multiplier is 10, total data size for level-1 + * will be 20MB, total file size for level-2 will be 200MB, + * and total file size for level-3 will be 2GB. + * by default 'maxBytesForLevelBase' is 10MB. + * + * @return maxBytesForLevelBase the upper-bound of the total size of + * leve-1 files in bytes. + * @return the reference to the current option. + * @see setMaxBytesForLevelMultiplier() + */ + public Options setMaxBytesForLevelBase(long maxBytesForLevelBase) { + setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase); + return this; + } + private native void setMaxBytesForLevelBase( + long handle, long maxBytesForLevelBase); + + /** + * The ratio between the total size of level-(L+1) files and the total + * size of level-L files for all L. + * DEFAULT: 10 + * + * @return the ratio between the total size of level-(L+1) files and + * the total size of level-L files for all L. + * @see maxBytesForLevelBase() + */ + public int maxBytesForLevelMultiplier() { + return maxBytesForLevelMultiplier(nativeHandle_); + } + private native int maxBytesForLevelMultiplier(long handle); + + /** + * The ratio between the total size of level-(L+1) files and the total + * size of level-L files for all L. + * DEFAULT: 10 + * + * @param multiplier the ratio between the total size of level-(L+1) + * files and the total size of level-L files for all L. + * @return the reference to the current option. + * @see setMaxBytesForLevelBase() + */ + public Options setMaxBytesForLevelMultiplier(int multiplier) { + setMaxBytesForLevelMultiplier(nativeHandle_, multiplier); + return this; + } + private native void setMaxBytesForLevelMultiplier( + long handle, int multiplier); + + /** + * Maximum number of bytes in all compacted files. We avoid expanding + * the lower level file set of a compaction if it would make the + * total compaction cover more than + * (expanded_compaction_factor * targetFileSizeLevel()) many bytes. + * + * @return the maximum number of bytes in all compacted files. + * @see sourceCompactionFactor() + */ + public int expandedCompactionFactor() { + return expandedCompactionFactor(nativeHandle_); + } + private native int expandedCompactionFactor(long handle); + + /** + * Maximum number of bytes in all compacted files. We avoid expanding + * the lower level file set of a compaction if it would make the + * total compaction cover more than + * (expanded_compaction_factor * targetFileSizeLevel()) many bytes. + * + * @param expandedCompactionFactor the maximum number of bytes in all + * compacted files. + * @return the reference to the current option. + * @see setSourceCompactionFactor() + */ + public Options setExpandedCompactionFactor(int expandedCompactionFactor) { + setExpandedCompactionFactor(nativeHandle_, expandedCompactionFactor); + return this; + } + private native void setExpandedCompactionFactor( + long handle, int expandedCompactionFactor); + + /** + * Maximum number of bytes in all source files to be compacted in a + * single compaction run. We avoid picking too many files in the + * source level so that we do not exceed the total source bytes + * for compaction to exceed + * (source_compaction_factor * targetFileSizeLevel()) many bytes. + * Default:1, i.e. pick maxfilesize amount of data as the source of + * a compaction. + * + * @return the maximum number of bytes in all source files to be compactedo. + * @see expendedCompactionFactor() + */ + public int sourceCompactionFactor() { + return sourceCompactionFactor(nativeHandle_); + } + private native int sourceCompactionFactor(long handle); + + /** + * Maximum number of bytes in all source files to be compacted in a + * single compaction run. We avoid picking too many files in the + * source level so that we do not exceed the total source bytes + * for compaction to exceed + * (source_compaction_factor * targetFileSizeLevel()) many bytes. + * Default:1, i.e. pick maxfilesize amount of data as the source of + * a compaction. + * + * @param sourceCompactionFactor the maximum number of bytes in all + * source files to be compacted in a single compaction run. + * @return the reference to the current option. + * @see setExpendedCompactionFactor() + */ + public Options setSourceCompactionFactor(int sourceCompactionFactor) { + setSourceCompactionFactor(nativeHandle_, sourceCompactionFactor); + return this; + } + private native void setSourceCompactionFactor( + long handle, int sourceCompactionFactor); + + /** + * Control maximum bytes of overlaps in grandparent (i.e., level+2) before we + * stop building a single file in a level->level+1 compaction. + * + * @return maximum bytes of overlaps in "grandparent" level. + */ + public int maxGrandparentOverlapFactor() { + return maxGrandparentOverlapFactor(nativeHandle_); + } + private native int maxGrandparentOverlapFactor(long handle); + + /** + * Control maximum bytes of overlaps in grandparent (i.e., level+2) before we + * stop building a single file in a level->level+1 compaction. + * + * @param maxGrandparentOverlapFactor maximum bytes of overlaps in + * "grandparent" level. + * @return the reference to the current option. + */ + public Options setMaxGrandparentOverlapFactor( + int maxGrandparentOverlapFactor) { + setMaxGrandparentOverlapFactor(nativeHandle_, maxGrandparentOverlapFactor); + return this; + } + private native void setMaxGrandparentOverlapFactor( + long handle, int maxGrandparentOverlapFactor); + + /** + * Puts are delayed 0-1 ms when any level has a compaction score that exceeds + * soft_rate_limit. This is ignored when == 0.0. + * CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not + * hold, RocksDB will set soft_rate_limit = hard_rate_limit + * Default: 0 (disabled) + * + * @return soft-rate-limit for put delay. + */ + public double softRateLimit() { + return softRateLimit(nativeHandle_); + } + private native double softRateLimit(long handle); + + /** + * Puts are delayed 0-1 ms when any level has a compaction score that exceeds + * soft_rate_limit. This is ignored when == 0.0. + * CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not + * hold, RocksDB will set soft_rate_limit = hard_rate_limit + * Default: 0 (disabled) + * + * @param softRateLimit the soft-rate-limit of a compaction score + * for put delay. + * @return the reference to the current option. + */ + public Options setSoftRateLimit(double softRateLimit) { + setSoftRateLimit(nativeHandle_, softRateLimit); + return this; + } + private native void setSoftRateLimit( + long handle, double softRateLimit); + + /** + * Puts are delayed 1ms at a time when any level has a compaction score that + * exceeds hard_rate_limit. This is ignored when <= 1.0. + * Default: 0 (disabled) + * + * @return the hard-rate-limit of a compaction score for put delay. + */ + public double hardRateLimit() { + return hardRateLimit(nativeHandle_); + } + private native double hardRateLimit(long handle); + + /** + * Puts are delayed 1ms at a time when any level has a compaction score that + * exceeds hard_rate_limit. This is ignored when <= 1.0. + * Default: 0 (disabled) + * + * @param hardRateLimit the hard-rate-limit of a compaction score for put + * delay. + * @return the reference to the current option. + */ + public Options setHardRateLimit(double hardRateLimit) { + setHardRateLimit(nativeHandle_, hardRateLimit); + return this; + } + private native void setHardRateLimit( + long handle, double hardRateLimit); + + /** + * The maximum time interval a put will be stalled when hard_rate_limit + * is enforced. If 0, then there is no limit. + * Default: 1000 + * + * @return the maximum time interval a put will be stalled when + * hard_rate_limit is enforced. + */ + public int rateLimitDelayMaxMilliseconds() { + return rateLimitDelayMaxMilliseconds(nativeHandle_); + } + private native int rateLimitDelayMaxMilliseconds(long handle); + + /** + * The maximum time interval a put will be stalled when hard_rate_limit + * is enforced. If 0, then there is no limit. + * Default: 1000 + * + * @param rateLimitDelayMaxMilliseconds the maximum time interval a put + * will be stalled. + * @return the reference to the current option. + */ + public Options setRateLimitDelayMaxMilliseconds( + int rateLimitDelayMaxMilliseconds) { + setRateLimitDelayMaxMilliseconds( + nativeHandle_, rateLimitDelayMaxMilliseconds); + return this; + } + private native void setRateLimitDelayMaxMilliseconds( + long handle, int rateLimitDelayMaxMilliseconds); + + /** + * Disable block cache. If this is set to true, + * then no block cache should be used, and the block_cache should + * point to a nullptr object. + * Default: false + * + * @return true if block cache is disabled. + */ + public boolean noBlockCache() { + return noBlockCache(nativeHandle_); + } + private native boolean noBlockCache(long handle); + + /** + * Disable block cache. If this is set to true, + * then no block cache should be used, and the block_cache should + * point to a nullptr object. + * Default: false + * + * @param noBlockCache true if block-cache is disabled. + * @return the reference to the current option. + */ + public Options setNoBlockCache(boolean noBlockCache) { + setNoBlockCache(nativeHandle_, noBlockCache); + return this; + } + private native void setNoBlockCache( + long handle, boolean noBlockCache); + + /** + * The size of one block in arena memory allocation. + * If <= 0, a proper value is automatically calculated (usually 1/10 of + * writer_buffer_size). + * + * There are two additonal restriction of the The specified size: + * (1) size should be in the range of [4096, 2 << 30] and + * (2) be the multiple of the CPU word (which helps with the memory + * alignment). + * + * We'll automatically check and adjust the size number to make sure it + * conforms to the restrictions. + * Default: 0 + * + * @return the size of an arena block + */ + public long arenaBlockSize() { + return arenaBlockSize(nativeHandle_); + } + private native long arenaBlockSize(long handle); + + /** + * The size of one block in arena memory allocation. + * If <= 0, a proper value is automatically calculated (usually 1/10 of + * writer_buffer_size). + * + * There are two additonal restriction of the The specified size: + * (1) size should be in the range of [4096, 2 << 30] and + * (2) be the multiple of the CPU word (which helps with the memory + * alignment). + * + * We'll automatically check and adjust the size number to make sure it + * conforms to the restrictions. + * Default: 0 + * + * @param arenaBlockSize the size of an arena block + * @return the reference to the current option. + */ + public Options setArenaBlockSize(long arenaBlockSize) { + setArenaBlockSize(nativeHandle_, arenaBlockSize); + return this; + } + private native void setArenaBlockSize( + long handle, long arenaBlockSize); + + /** + * Disable automatic compactions. Manual compactions can still + * be issued on this column family + * + * @return true if auto-compactions are disabled. + */ + public boolean disableAutoCompactions() { + return disableAutoCompactions(nativeHandle_); + } + private native boolean disableAutoCompactions(long handle); + + /** + * Disable automatic compactions. Manual compactions can still + * be issued on this column family + * + * @param disableAutoCompactions true if auto-compactions are disabled. + * @return the reference to the current option. + */ + public Options setDisableAutoCompactions(boolean disableAutoCompactions) { + setDisableAutoCompactions(nativeHandle_, disableAutoCompactions); + return this; + } + private native void setDisableAutoCompactions( + long handle, boolean disableAutoCompactions); + + /** + * Purge duplicate/deleted keys when a memtable is flushed to storage. + * Default: true + * + * @return true if purging keys is disabled. + */ + public boolean purgeRedundantKvsWhileFlush() { + return purgeRedundantKvsWhileFlush(nativeHandle_); + } + private native boolean purgeRedundantKvsWhileFlush(long handle); + + /** + * Purge duplicate/deleted keys when a memtable is flushed to storage. + * Default: true + * + * @param purgeRedundantKvsWhileFlush true if purging keys is disabled. + * @return the reference to the current option. + */ + public Options setPurgeRedundantKvsWhileFlush( + boolean purgeRedundantKvsWhileFlush) { + setPurgeRedundantKvsWhileFlush( + nativeHandle_, purgeRedundantKvsWhileFlush); + return this; + } + private native void setPurgeRedundantKvsWhileFlush( + long handle, boolean purgeRedundantKvsWhileFlush); + + /** + * This is used to close a block before it reaches the configured + * 'block_size'. If the percentage of free space in the current block is less + * than this specified number and adding a new record to the block will + * exceed the configured block size, then this block will be closed and the + * new record will be written to the next block. + * Default is 10. + * + * @return the target block size + */ + public int blockSizeDeviation() { + return blockSizeDeviation(nativeHandle_); + } + private native int blockSizeDeviation(long handle); + + /** + * This is used to close a block before it reaches the configured + * 'block_size'. If the percentage of free space in the current block is less + * than this specified number and adding a new record to the block will + * exceed the configured block size, then this block will be closed and the + * new record will be written to the next block. + * Default is 10. + * + * @param blockSizeDeviation the target block size + * @return the reference to the current option. + */ + public Options setBlockSizeDeviation(int blockSizeDeviation) { + setBlockSizeDeviation(nativeHandle_, blockSizeDeviation); + return this; + } + private native void setBlockSizeDeviation( + long handle, int blockSizeDeviation); + + /** + * If true, compaction will verify checksum on every read that happens + * as part of compaction + * Default: true + * + * @return true if compaction verifies checksum on every read. + */ + public boolean verifyChecksumsInCompaction() { + return verifyChecksumsInCompaction(nativeHandle_); + } + private native boolean verifyChecksumsInCompaction(long handle); + + /** + * If true, compaction will verify checksum on every read that happens + * as part of compaction + * Default: true + * + * @param verifyChecksumsInCompaction true if compaction verifies + * checksum on every read. + * @return the reference to the current option. + */ + public Options setVerifyChecksumsInCompaction( + boolean verifyChecksumsInCompaction) { + setVerifyChecksumsInCompaction( + nativeHandle_, verifyChecksumsInCompaction); + return this; + } + private native void setVerifyChecksumsInCompaction( + long handle, boolean verifyChecksumsInCompaction); + + /** + * Use KeyMayExist API to filter deletes when this is true. + * If KeyMayExist returns false, i.e. the key definitely does not exist, then + * the delete is a noop. KeyMayExist only incurs in-memory look up. + * This optimization avoids writing the delete to storage when appropriate. + * Default: false + * + * @return true if filter-deletes behavior is on. + */ + public boolean filterDeletes() { + return filterDeletes(nativeHandle_); + } + private native boolean filterDeletes(long handle); + + /** + * Use KeyMayExist API to filter deletes when this is true. + * If KeyMayExist returns false, i.e. the key definitely does not exist, then + * the delete is a noop. KeyMayExist only incurs in-memory look up. + * This optimization avoids writing the delete to storage when appropriate. + * Default: false + * + * @param filterDeletes true if filter-deletes behavior is on. + * @return the reference to the current option. + */ + public Options setFilterDeletes(boolean filterDeletes) { + setFilterDeletes(nativeHandle_, filterDeletes); + return this; + } + private native void setFilterDeletes( + long handle, boolean filterDeletes); + + /** + * An iteration->Next() sequentially skips over keys with the same + * user-key unless this option is set. This number specifies the number + * of keys (with the same userkey) that will be sequentially + * skipped before a reseek is issued. + * Default: 8 + * + * @return the number of keys could be skipped in a iteration. + */ + public long maxSequentialSkipInIterations() { + return maxSequentialSkipInIterations(nativeHandle_); + } + private native long maxSequentialSkipInIterations(long handle); + + /** + * An iteration->Next() sequentially skips over keys with the same + * user-key unless this option is set. This number specifies the number + * of keys (with the same userkey) that will be sequentially + * skipped before a reseek is issued. + * Default: 8 + * + * @param maxSequentialSkipInIterations the number of keys could + * be skipped in a iteration. + * @return the reference to the current option. + */ + public Options setMaxSequentialSkipInIterations(long maxSequentialSkipInIterations) { + setMaxSequentialSkipInIterations(nativeHandle_, maxSequentialSkipInIterations); + return this; + } + private native void setMaxSequentialSkipInIterations( + long handle, long maxSequentialSkipInIterations); + + /** + * Allows thread-safe inplace updates. + * If inplace_callback function is not set, + * Put(key, new_value) will update inplace the existing_value iff + * * key exists in current memtable + * * new sizeof(new_value) <= sizeof(existing_value) + * * existing_value for that key is a put i.e. kTypeValue + * If inplace_callback function is set, check doc for inplace_callback. + * Default: false. + * + * @return true if thread-safe inplace updates are allowed. + */ + public boolean inplaceUpdateSupport() { + return inplaceUpdateSupport(nativeHandle_); + } + private native boolean inplaceUpdateSupport(long handle); + + /** + * Allows thread-safe inplace updates. + * If inplace_callback function is not set, + * Put(key, new_value) will update inplace the existing_value iff + * * key exists in current memtable + * * new sizeof(new_value) <= sizeof(existing_value) + * * existing_value for that key is a put i.e. kTypeValue + * If inplace_callback function is set, check doc for inplace_callback. + * Default: false. + * + * @param inplaceUpdateSupport true if thread-safe inplace updates + * are allowed. + * @return the reference to the current option. + */ + public Options setInplaceUpdateSupport(boolean inplaceUpdateSupport) { + setInplaceUpdateSupport(nativeHandle_, inplaceUpdateSupport); + return this; + } + private native void setInplaceUpdateSupport( + long handle, boolean inplaceUpdateSupport); + + /** + * Number of locks used for inplace update + * Default: 10000, if inplace_update_support = true, else 0. + * + * @return the number of locks used for inplace update. + */ + public long inplaceUpdateNumLocks() { + return inplaceUpdateNumLocks(nativeHandle_); + } + private native long inplaceUpdateNumLocks(long handle); + + /** + * Number of locks used for inplace update + * Default: 10000, if inplace_update_support = true, else 0. + * + * @param inplaceUpdateNumLocks the number of locks used for + * inplace updates. + * @return the reference to the current option. + */ + public Options setInplaceUpdateNumLocks(long inplaceUpdateNumLocks) { + setInplaceUpdateNumLocks(nativeHandle_, inplaceUpdateNumLocks); + return this; + } + private native void setInplaceUpdateNumLocks( + long handle, long inplaceUpdateNumLocks); + + /** + * Returns the number of bits used in the prefix bloom filter. + * + * This value will be used only when a prefix-extractor is specified. + * + * @return the number of bloom-bits. + * @see useFixedLengthPrefixExtractor() + */ + public int memtablePrefixBloomBits() { + return memtablePrefixBloomBits(nativeHandle_); + } + private native int memtablePrefixBloomBits(long handle); + + /** + * Sets the number of bits used in the prefix bloom filter. + * + * This value will be used only when a prefix-extractor is specified. + * + * @param memtablePrefixBloomBits the number of bits used in the + * prefix bloom filter. + * @return the reference to the current option. + */ + public Options setMemtablePrefixBloomBits(int memtablePrefixBloomBits) { + setMemtablePrefixBloomBits(nativeHandle_, memtablePrefixBloomBits); + return this; + } + private native void setMemtablePrefixBloomBits( + long handle, int memtablePrefixBloomBits); + + /** + * The number of hash probes per key used in the mem-table. + * + * @return the number of hash probes per key. + */ + public int memtablePrefixBloomProbes() { + return memtablePrefixBloomProbes(nativeHandle_); + } + private native int memtablePrefixBloomProbes(long handle); + + /** + * The number of hash probes per key used in the mem-table. + * + * @param memtablePrefixBloomProbes the number of hash probes per key. + * @return the reference to the current option. + */ + public Options setMemtablePrefixBloomProbes(int memtablePrefixBloomProbes) { + setMemtablePrefixBloomProbes(nativeHandle_, memtablePrefixBloomProbes); + return this; + } + private native void setMemtablePrefixBloomProbes( + long handle, int memtablePrefixBloomProbes); + + /** + * Control locality of bloom filter probes to improve cache miss rate. + * This option only applies to memtable prefix bloom and plaintable + * prefix bloom. It essentially limits the max number of cache lines each + * bloom filter check can touch. + * This optimization is turned off when set to 0. The number should never + * be greater than number of probes. This option can boost performance + * for in-memory workload but should use with care since it can cause + * higher false positive rate. + * Default: 0 + * + * @return the level of locality of bloom-filter probes. + * @see setMemTablePrefixBloomProbes + */ + public int bloomLocality() { + return bloomLocality(nativeHandle_); + } + private native int bloomLocality(long handle); + + /** + * Control locality of bloom filter probes to improve cache miss rate. + * This option only applies to memtable prefix bloom and plaintable + * prefix bloom. It essentially limits the max number of cache lines each + * bloom filter check can touch. + * This optimization is turned off when set to 0. The number should never + * be greater than number of probes. This option can boost performance + * for in-memory workload but should use with care since it can cause + * higher false positive rate. + * Default: 0 + * + * @param bloomLocality the level of locality of bloom-filter probes. + * @return the reference to the current option. + */ + public Options setBloomLocality(int bloomLocality) { + setBloomLocality(nativeHandle_, bloomLocality); + return this; + } + private native void setBloomLocality( + long handle, int bloomLocality); + + /** + * Maximum number of successive merge operations on a key in the memtable. + * + * When a merge operation is added to the memtable and the maximum number of + * successive merges is reached, the value of the key will be calculated and + * inserted into the memtable instead of the merge operation. This will + * ensure that there are never more than max_successive_merges merge + * operations in the memtable. + * + * Default: 0 (disabled) + * + * @return the maximum number of successive merges. + */ + public long maxSuccessiveMerges() { + return maxSuccessiveMerges(nativeHandle_); + } + private native long maxSuccessiveMerges(long handle); + + /** + * Maximum number of successive merge operations on a key in the memtable. + * + * When a merge operation is added to the memtable and the maximum number of + * successive merges is reached, the value of the key will be calculated and + * inserted into the memtable instead of the merge operation. This will + * ensure that there are never more than max_successive_merges merge + * operations in the memtable. + * + * Default: 0 (disabled) + * + * @param maxSuccessiveMerges the maximum number of successive merges. + * @return the reference to the current option. + */ + public Options setMaxSuccessiveMerges(long maxSuccessiveMerges) { + setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges); + return this; + } + private native void setMaxSuccessiveMerges( + long handle, long maxSuccessiveMerges); + + /** + * The minimum number of write buffers that will be merged together + * before writing to storage. If set to 1, then + * all write buffers are fushed to L0 as individual files and this increases + * read amplification because a get request has to check in all of these + * files. Also, an in-memory merge may result in writing lesser + * data to storage if there are duplicate records in each of these + * individual write buffers. Default: 1 + * + * @return the minimum number of write buffers that will be merged together. + */ + public int minWriteBufferNumberToMerge() { + return minWriteBufferNumberToMerge(nativeHandle_); + } + private native int minWriteBufferNumberToMerge(long handle); + + /** + * The minimum number of write buffers that will be merged together + * before writing to storage. If set to 1, then + * all write buffers are fushed to L0 as individual files and this increases + * read amplification because a get request has to check in all of these + * files. Also, an in-memory merge may result in writing lesser + * data to storage if there are duplicate records in each of these + * individual write buffers. Default: 1 + * + * @param minWriteBufferNumberToMerge the minimum number of write buffers + * that will be merged together. + * @return the reference to the current option. + */ + public Options setMinWriteBufferNumberToMerge(int minWriteBufferNumberToMerge) { + setMinWriteBufferNumberToMerge(nativeHandle_, minWriteBufferNumberToMerge); + return this; + } + private native void setMinWriteBufferNumberToMerge( + long handle, int minWriteBufferNumberToMerge); + + /** + * The number of partial merge operands to accumulate before partial + * merge will be performed. Partial merge will not be called + * if the list of values to merge is less than min_partial_merge_operands. + * + * If min_partial_merge_operands < 2, then it will be treated as 2. + * + * Default: 2 + * + * @return + */ + public int minPartialMergeOperands() { + return minPartialMergeOperands(nativeHandle_); + } + private native int minPartialMergeOperands(long handle); + + /** + * The number of partial merge operands to accumulate before partial + * merge will be performed. Partial merge will not be called + * if the list of values to merge is less than min_partial_merge_operands. + * + * If min_partial_merge_operands < 2, then it will be treated as 2. + * + * Default: 2 + * + * @param minPartialMergeOperands + * @return the reference to the current option. + */ + public Options setMinPartialMergeOperands(int minPartialMergeOperands) { + setMinPartialMergeOperands(nativeHandle_, minPartialMergeOperands); + return this; + } + private native void setMinPartialMergeOperands( + long handle, int minPartialMergeOperands); + /** * Release the memory allocated for the current instance * in the c++ side. diff --git a/java/org/rocksdb/test/OptionsTest.java b/java/org/rocksdb/test/OptionsTest.java index 7c13db6d18aee268f695e008d73004b09eb7fcba..5971d96a596b9f6abdb7320f531610613978684a 100644 --- a/java/org/rocksdb/test/OptionsTest.java +++ b/java/org/rocksdb/test/OptionsTest.java @@ -195,6 +195,228 @@ public class OptionsTest { assert(opt.allowThreadLocal() == boolValue); } + { // WriteBufferSize test + long longValue = rand.nextLong(); + opt.setWriteBufferSize(longValue); + assert(opt.writeBufferSize() == longValue); + } + + { // MaxWriteBufferNumber test + int intValue = rand.nextInt(); + opt.setMaxWriteBufferNumber(intValue); + assert(opt.maxWriteBufferNumber() == intValue); + } + + { // MinWriteBufferNumberToMerge test + int intValue = rand.nextInt(); + opt.setMinWriteBufferNumberToMerge(intValue); + assert(opt.minWriteBufferNumberToMerge() == intValue); + } + + { // BlockSize test + long longValue = rand.nextLong(); + opt.setBlockSize(longValue); + assert(opt.blockSize() == longValue); + } + + { // BlockRestartInterval test + int intValue = rand.nextInt(); + opt.setBlockRestartInterval(intValue); + assert(opt.blockRestartInterval() == intValue); + } + + { // WholeKeyFiltering test + boolean boolValue = rand.nextBoolean(); + opt.setWholeKeyFiltering(boolValue); + assert(opt.wholeKeyFiltering() == boolValue); + } + + { // NumLevels test + int intValue = rand.nextInt(); + opt.setNumLevels(intValue); + assert(opt.numLevels() == intValue); + } + + { // LevelFileNumCompactionTrigger test + int intValue = rand.nextInt(); + opt.setLevelZeroFileNumCompactionTrigger(intValue); + assert(opt.levelZeroFileNumCompactionTrigger() == intValue); + } + + { // LevelSlowdownWritesTrigger test + int intValue = rand.nextInt(); + opt.setLevelZeroSlowdownWritesTrigger(intValue); + assert(opt.levelZeroSlowdownWritesTrigger() == intValue); + } + + { // LevelStopWritesTrigger test + int intValue = rand.nextInt(); + opt.setLevelZeroStopWritesTrigger(intValue); + assert(opt.levelZeroStopWritesTrigger() == intValue); + } + + { // MaxMemCompactionLevel test + int intValue = rand.nextInt(); + opt.setMaxMemCompactionLevel(intValue); + assert(opt.maxMemCompactionLevel() == intValue); + } + + { // TargetFileSizeBase test + int intValue = rand.nextInt(); + opt.setTargetFileSizeBase(intValue); + assert(opt.targetFileSizeBase() == intValue); + } + + { // TargetFileSizeMultiplier test + int intValue = rand.nextInt(); + opt.setTargetFileSizeMultiplier(intValue); + assert(opt.targetFileSizeMultiplier() == intValue); + } + + { // MaxBytesForLevelBase test + long longValue = rand.nextLong(); + opt.setMaxBytesForLevelBase(longValue); + assert(opt.maxBytesForLevelBase() == longValue); + } + + { // MaxBytesForLevelMultiplier test + int intValue = rand.nextInt(); + opt.setMaxBytesForLevelMultiplier(intValue); + assert(opt.maxBytesForLevelMultiplier() == intValue); + } + + { // ExpandedCompactionFactor test + int intValue = rand.nextInt(); + opt.setExpandedCompactionFactor(intValue); + assert(opt.expandedCompactionFactor() == intValue); + } + + { // SourceCompactionFactor test + int intValue = rand.nextInt(); + opt.setSourceCompactionFactor(intValue); + assert(opt.sourceCompactionFactor() == intValue); + } + + { // MaxGrandparentOverlapFactor test + int intValue = rand.nextInt(); + opt.setMaxGrandparentOverlapFactor(intValue); + assert(opt.maxGrandparentOverlapFactor() == intValue); + } + + { // DisableSeekCompaction test + boolean boolValue = rand.nextBoolean(); + opt.setDisableSeekCompaction(boolValue); + assert(opt.disableSeekCompaction() == boolValue); + } + + { // SoftRateLimit test + double doubleValue = rand.nextDouble(); + opt.setSoftRateLimit(doubleValue); + assert(opt.softRateLimit() == doubleValue); + } + + { // HardRateLimit test + double doubleValue = rand.nextDouble(); + opt.setHardRateLimit(doubleValue); + assert(opt.hardRateLimit() == doubleValue); + } + + { // RateLimitDelayMaxMilliseconds test + int intValue = rand.nextInt(); + opt.setRateLimitDelayMaxMilliseconds(intValue); + assert(opt.rateLimitDelayMaxMilliseconds() == intValue); + } + + { // NoBlockCache test + boolean boolValue = rand.nextBoolean(); + opt.setNoBlockCache(boolValue); + assert(opt.noBlockCache() == boolValue); + } + + { // ArenaBlockSize test + long longValue = rand.nextLong(); + opt.setArenaBlockSize(longValue); + assert(opt.arenaBlockSize() == longValue); + } + + { // DisableAutoCompactions test + boolean boolValue = rand.nextBoolean(); + opt.setDisableAutoCompactions(boolValue); + assert(opt.disableAutoCompactions() == boolValue); + } + + { // PurgeRedundantKvsWhileFlush test + boolean boolValue = rand.nextBoolean(); + opt.setPurgeRedundantKvsWhileFlush(boolValue); + assert(opt.purgeRedundantKvsWhileFlush() == boolValue); + } + + { // BlockSizeDeviation test + int intValue = rand.nextInt(); + opt.setBlockSizeDeviation(intValue); + assert(opt.blockSizeDeviation() == intValue); + } + + { // VerifyChecksumsInCompaction test + boolean boolValue = rand.nextBoolean(); + opt.setVerifyChecksumsInCompaction(boolValue); + assert(opt.verifyChecksumsInCompaction() == boolValue); + } + + { // FilterDeletes test + boolean boolValue = rand.nextBoolean(); + opt.setFilterDeletes(boolValue); + assert(opt.filterDeletes() == boolValue); + } + + { // MaxSequentialSkipInIterations test + long longValue = rand.nextLong(); + opt.setMaxSequentialSkipInIterations(longValue); + assert(opt.maxSequentialSkipInIterations() == longValue); + } + + { // InplaceUpdateSupport test + boolean boolValue = rand.nextBoolean(); + opt.setInplaceUpdateSupport(boolValue); + assert(opt.inplaceUpdateSupport() == boolValue); + } + + { // InplaceUpdateNumLocks test + long longValue = rand.nextLong(); + opt.setInplaceUpdateNumLocks(longValue); + assert(opt.inplaceUpdateNumLocks() == longValue); + } + + { // MemtablePrefixBloomBits test + int intValue = rand.nextInt(); + opt.setMemtablePrefixBloomBits(intValue); + assert(opt.memtablePrefixBloomBits() == intValue); + } + + { // MemtablePrefixBloomProbes test + int intValue = rand.nextInt(); + opt.setMemtablePrefixBloomProbes(intValue); + assert(opt.memtablePrefixBloomProbes() == intValue); + } + + { // BloomLocality test + int intValue = rand.nextInt(); + opt.setBloomLocality(intValue); + assert(opt.bloomLocality() == intValue); + } + + { // MaxSuccessiveMerges test + long longValue = rand.nextLong(); + opt.setMaxSuccessiveMerges(longValue); + assert(opt.maxSuccessiveMerges() == longValue); + } + + { // MinPartialMergeOperands test + int intValue = rand.nextInt(); + opt.setMinPartialMergeOperands(intValue); + assert(opt.minPartialMergeOperands() == intValue); + } + opt.dispose(); System.out.println("Passed OptionsTest"); } diff --git a/java/rocksjni/options.cc b/java/rocksjni/options.cc index d9aef3d741f6bf1a1cb1767e3dd763021b2e3646..6cbe1c27fef134f5fe4b9a589e8b6a80f78f6e6a 100644 --- a/java/rocksjni/options.cc +++ b/java/rocksjni/options.cc @@ -870,6 +870,764 @@ jstring Java_org_rocksdb_Options_tableFactoryName( return env->NewStringUTF(tf->Name()); } + +/* + * Class: org_rocksdb_Options + * Method: minWriteBufferNumberToMerge + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->min_write_buffer_number_to_merge; +} + +/* + * Class: org_rocksdb_Options + * Method: setMinWriteBufferNumberToMerge + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmin_write_buffer_number_to_merge) { + reinterpret_cast( + jhandle)->min_write_buffer_number_to_merge = + static_cast(jmin_write_buffer_number_to_merge); +} + +/* + * Class: org_rocksdb_Options + * Method: blockRestartInterval + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_blockRestartInterval( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->block_restart_interval; +} + +/* + * Class: org_rocksdb_Options + * Method: setBlockRestartInterval + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setBlockRestartInterval( + JNIEnv* env, jobject jobj, jlong jhandle, jint jblock_restart_interval) { + reinterpret_cast(jhandle)->block_restart_interval = + static_cast(jblock_restart_interval); +} + +/* + * Class: org_rocksdb_Options + * Method: wholeKeyFiltering + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_wholeKeyFiltering( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->whole_key_filtering; +} + +/* + * Class: org_rocksdb_Options + * Method: setWholeKeyFiltering + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setWholeKeyFiltering( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jwhole_key_filtering) { + reinterpret_cast(jhandle)->whole_key_filtering = + static_cast(jwhole_key_filtering); +} + +/* + * Class: org_rocksdb_Options + * Method: numLevels + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_numLevels( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->num_levels; +} + +/* + * Class: org_rocksdb_Options + * Method: setNumLevels + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setNumLevels( + JNIEnv* env, jobject jobj, jlong jhandle, jint jnum_levels) { + reinterpret_cast(jhandle)->num_levels = + static_cast(jnum_levels); +} + +/* + * Class: org_rocksdb_Options + * Method: levelZeroFileNumCompactionTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_levelZeroFileNumCompactionTrigger( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->level0_file_num_compaction_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevelZeroFileNumCompactionTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevelZeroFileNumCompactionTrigger( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jlevel0_file_num_compaction_trigger) { + reinterpret_cast( + jhandle)->level0_file_num_compaction_trigger = + static_cast(jlevel0_file_num_compaction_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: levelZeroSlowdownWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_levelZeroSlowdownWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->level0_slowdown_writes_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevelSlowdownWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevelZeroSlowdownWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jlevel0_slowdown_writes_trigger) { + reinterpret_cast( + jhandle)->level0_slowdown_writes_trigger = + static_cast(jlevel0_slowdown_writes_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: levelZeroStopWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_levelZeroStopWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->level0_stop_writes_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevelStopWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevelZeroStopWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jlevel0_stop_writes_trigger) { + reinterpret_cast(jhandle)->level0_stop_writes_trigger = + static_cast(jlevel0_stop_writes_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: maxMemCompactionLevel + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxMemCompactionLevel( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_mem_compaction_level; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxMemCompactionLevel + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxMemCompactionLevel( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmax_mem_compaction_level) { + reinterpret_cast(jhandle)->max_mem_compaction_level = + static_cast(jmax_mem_compaction_level); +} + +/* + * Class: org_rocksdb_Options + * Method: targetFileSizeBase + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_targetFileSizeBase( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->target_file_size_base; +} + +/* + * Class: org_rocksdb_Options + * Method: setTargetFileSizeBase + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setTargetFileSizeBase( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jtarget_file_size_base) { + reinterpret_cast(jhandle)->target_file_size_base = + static_cast(jtarget_file_size_base); +} + +/* + * Class: org_rocksdb_Options + * Method: targetFileSizeMultiplier + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_targetFileSizeMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->target_file_size_multiplier; +} + +/* + * Class: org_rocksdb_Options + * Method: setTargetFileSizeMultiplier + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setTargetFileSizeMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jtarget_file_size_multiplier) { + reinterpret_cast( + jhandle)->target_file_size_multiplier = + static_cast(jtarget_file_size_multiplier); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBytesForLevelBase + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxBytesForLevelBase( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_bytes_for_level_base; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBytesForLevelBase + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxBytesForLevelBase( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jmax_bytes_for_level_base) { + reinterpret_cast( + jhandle)->max_bytes_for_level_base = + static_cast(jmax_bytes_for_level_base); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBytesForLevelMultiplier + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxBytesForLevelMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_bytes_for_level_multiplier; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBytesForLevelMultiplier + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmax_bytes_for_level_multiplier) { + reinterpret_cast( + jhandle)->max_bytes_for_level_multiplier = + static_cast(jmax_bytes_for_level_multiplier); +} + +/* + * Class: org_rocksdb_Options + * Method: expandedCompactionFactor + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_expandedCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->expanded_compaction_factor; +} + +/* + * Class: org_rocksdb_Options + * Method: setExpandedCompactionFactor + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setExpandedCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jexpanded_compaction_factor) { + reinterpret_cast( + jhandle)->expanded_compaction_factor = + static_cast(jexpanded_compaction_factor); +} + +/* + * Class: org_rocksdb_Options + * Method: sourceCompactionFactor + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_sourceCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->source_compaction_factor; +} + +/* + * Class: org_rocksdb_Options + * Method: setSourceCompactionFactor + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setSourceCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jsource_compaction_factor) { + reinterpret_cast( + jhandle)->source_compaction_factor = + static_cast(jsource_compaction_factor); +} + +/* + * Class: org_rocksdb_Options + * Method: maxGrandparentOverlapFactor + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxGrandparentOverlapFactor( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_grandparent_overlap_factor; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxGrandparentOverlapFactor + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxGrandparentOverlapFactor( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmax_grandparent_overlap_factor) { + reinterpret_cast( + jhandle)->max_grandparent_overlap_factor = + static_cast(jmax_grandparent_overlap_factor); +} + +/* + * Class: org_rocksdb_Options + * Method: softRateLimit + * Signature: (J)D + */ +jdouble Java_org_rocksdb_Options_softRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->soft_rate_limit; +} + +/* + * Class: org_rocksdb_Options + * Method: setSoftRateLimit + * Signature: (JD)V + */ +void Java_org_rocksdb_Options_setSoftRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle, jdouble jsoft_rate_limit) { + reinterpret_cast(jhandle)->soft_rate_limit = + static_cast(jsoft_rate_limit); +} + +/* + * Class: org_rocksdb_Options + * Method: hardRateLimit + * Signature: (J)D + */ +jdouble Java_org_rocksdb_Options_hardRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->hard_rate_limit; +} + +/* + * Class: org_rocksdb_Options + * Method: setHardRateLimit + * Signature: (JD)V + */ +void Java_org_rocksdb_Options_setHardRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle, jdouble jhard_rate_limit) { + reinterpret_cast(jhandle)->hard_rate_limit = + static_cast(jhard_rate_limit); +} + +/* + * Class: org_rocksdb_Options + * Method: rateLimitDelayMaxMilliseconds + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_rateLimitDelayMaxMilliseconds( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->rate_limit_delay_max_milliseconds; +} + +/* + * Class: org_rocksdb_Options + * Method: setRateLimitDelayMaxMilliseconds + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setRateLimitDelayMaxMilliseconds( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jrate_limit_delay_max_milliseconds) { + reinterpret_cast( + jhandle)->rate_limit_delay_max_milliseconds = + static_cast(jrate_limit_delay_max_milliseconds); +} + +/* + * Class: org_rocksdb_Options + * Method: noBlockCache + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_noBlockCache( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->no_block_cache; +} + +/* + * Class: org_rocksdb_Options + * Method: setNoBlockCache + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setNoBlockCache( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jno_block_cache) { + reinterpret_cast(jhandle)->no_block_cache = + static_cast(jno_block_cache); +} + +/* + * Class: org_rocksdb_Options + * Method: arenaBlockSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_arenaBlockSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->arena_block_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setArenaBlockSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setArenaBlockSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jarena_block_size) { + reinterpret_cast(jhandle)->arena_block_size = + static_cast(jarena_block_size); +} + +/* + * Class: org_rocksdb_Options + * Method: disableAutoCompactions + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_disableAutoCompactions( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->disable_auto_compactions; +} + +/* + * Class: org_rocksdb_Options + * Method: setDisableAutoCompactions + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setDisableAutoCompactions( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jdisable_auto_compactions) { + reinterpret_cast( + jhandle)->disable_auto_compactions = + static_cast(jdisable_auto_compactions); +} + +/* + * Class: org_rocksdb_Options + * Method: purgeRedundantKvsWhileFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_purgeRedundantKvsWhileFlush( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->purge_redundant_kvs_while_flush; +} + +/* + * Class: org_rocksdb_Options + * Method: setPurgeRedundantKvsWhileFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setPurgeRedundantKvsWhileFlush( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jpurge_redundant_kvs_while_flush) { + reinterpret_cast( + jhandle)->purge_redundant_kvs_while_flush = + static_cast(jpurge_redundant_kvs_while_flush); +} + +/* + * Class: org_rocksdb_Options + * Method: blockSizeDeviation + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_blockSizeDeviation( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->block_size_deviation; +} + +/* + * Class: org_rocksdb_Options + * Method: setBlockSizeDeviation + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setBlockSizeDeviation( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jblock_size_deviation) { + reinterpret_cast(jhandle)->block_size_deviation = + static_cast(jblock_size_deviation); +} + +/* + * Class: org_rocksdb_Options + * Method: verifyChecksumsInCompaction + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_verifyChecksumsInCompaction( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->verify_checksums_in_compaction; +} + +/* + * Class: org_rocksdb_Options + * Method: setVerifyChecksumsInCompaction + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setVerifyChecksumsInCompaction( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jverify_checksums_in_compaction) { + reinterpret_cast( + jhandle)->verify_checksums_in_compaction = + static_cast(jverify_checksums_in_compaction); +} + +/* + * Class: org_rocksdb_Options + * Method: filterDeletes + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_filterDeletes( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->filter_deletes; +} + +/* + * Class: org_rocksdb_Options + * Method: setFilterDeletes + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setFilterDeletes( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jfilter_deletes) { + reinterpret_cast(jhandle)->filter_deletes = + static_cast(jfilter_deletes); +} + +/* + * Class: org_rocksdb_Options + * Method: maxSequentialSkipInIterations + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxSequentialSkipInIterations( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_sequential_skip_in_iterations; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxSequentialSkipInIterations + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxSequentialSkipInIterations( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jmax_sequential_skip_in_iterations) { + reinterpret_cast( + jhandle)->max_sequential_skip_in_iterations = + static_cast(jmax_sequential_skip_in_iterations); +} + +/* + * Class: org_rocksdb_Options + * Method: inplaceUpdateSupport + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_inplaceUpdateSupport( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->inplace_update_support; +} + +/* + * Class: org_rocksdb_Options + * Method: setInplaceUpdateSupport + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setInplaceUpdateSupport( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jinplace_update_support) { + reinterpret_cast( + jhandle)->inplace_update_support = + static_cast(jinplace_update_support); +} + +/* + * Class: org_rocksdb_Options + * Method: inplaceUpdateNumLocks + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_inplaceUpdateNumLocks( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->inplace_update_num_locks; +} + +/* + * Class: org_rocksdb_Options + * Method: setInplaceUpdateNumLocks + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setInplaceUpdateNumLocks( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jinplace_update_num_locks) { + reinterpret_cast( + jhandle)->inplace_update_num_locks = + static_cast(jinplace_update_num_locks); +} + +/* + * Class: org_rocksdb_Options + * Method: memtablePrefixBloomBits + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_memtablePrefixBloomBits( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->memtable_prefix_bloom_bits; +} + +/* + * Class: org_rocksdb_Options + * Method: setMemtablePrefixBloomBits + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMemtablePrefixBloomBits( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmemtable_prefix_bloom_bits) { + reinterpret_cast( + jhandle)->memtable_prefix_bloom_bits = + static_cast(jmemtable_prefix_bloom_bits); +} + +/* + * Class: org_rocksdb_Options + * Method: memtablePrefixBloomProbes + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_memtablePrefixBloomProbes( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->memtable_prefix_bloom_probes; +} + +/* + * Class: org_rocksdb_Options + * Method: setMemtablePrefixBloomProbes + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMemtablePrefixBloomProbes( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmemtable_prefix_bloom_probes) { + reinterpret_cast( + jhandle)->memtable_prefix_bloom_probes = + static_cast(jmemtable_prefix_bloom_probes); +} + +/* + * Class: org_rocksdb_Options + * Method: bloomLocality + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_bloomLocality( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->bloom_locality; +} + +/* + * Class: org_rocksdb_Options + * Method: setBloomLocality + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setBloomLocality( + JNIEnv* env, jobject jobj, jlong jhandle, jint jbloom_locality) { + reinterpret_cast(jhandle)->bloom_locality = + static_cast(jbloom_locality); +} + +/* + * Class: org_rocksdb_Options + * Method: maxSuccessiveMerges + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxSuccessiveMerges( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_successive_merges; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxSuccessiveMerges + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxSuccessiveMerges( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jmax_successive_merges) { + reinterpret_cast(jhandle)->max_successive_merges = + static_cast(jmax_successive_merges); +} + +/* + * Class: org_rocksdb_Options + * Method: minPartialMergeOperands + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_minPartialMergeOperands( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->min_partial_merge_operands; +} + +/* + * Class: org_rocksdb_Options + * Method: setMinPartialMergeOperands + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMinPartialMergeOperands( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmin_partial_merge_operands) { + reinterpret_cast( + jhandle)->min_partial_merge_operands = + static_cast(jmin_partial_merge_operands); +} + ////////////////////////////////////////////////////////////////////////////// // WriteOptions