提交 35c968f3 编写于 作者: I Igor Canadi

Merge pull request #122 from ankgup87/master

[Java] Add statistics JNI bindings
......@@ -422,8 +422,8 @@ ldb: tools/ldb.o $(LIBOBJECTS)
# ---------------------------------------------------------------------------
# Jni stuff
# ---------------------------------------------------------------------------
JNI_NATIVE_SOURCES = ./java/rocksjni/*.cc
JNI_NATIVE_SOURCES = ./java/rocksjni/*.cc
JAVA_INCLUDE = -I/usr/lib/jvm/java-openjdk/include/ -I/usr/lib/jvm/java-openjdk/include/linux
ROCKSDBJNILIB = ./java/librocksdbjni.so
......
NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions
NATIVE_JAVA_CLASSES = org.rocksdb.RocksDB org.rocksdb.Options org.rocksdb.WriteBatch org.rocksdb.WriteBatchInternal org.rocksdb.WriteBatchTest org.rocksdb.WriteOptions org.rocksdb.BackupableDB org.rocksdb.BackupableDBOptions org.rocksdb.Statistics
NATIVE_INCLUDE = ./include
ROCKSDB_JAR = rocksdbjni.jar
......@@ -16,7 +16,7 @@ sample: java
javac -cp $(ROCKSDB_JAR) RocksDBSample.java
@rm -rf /tmp/rocksdbjni
@rm -rf /tmp/rocksdbjni_not_found
java -ea -Djava.library.path=.:../ -cp ".:./*" RocksDBSample /tmp/rocksdbjni
java -ea -Djava.library.path=.:../ -cp ".:./*" -Xcheck:jni RocksDBSample /tmp/rocksdbjni
@rm -rf /tmp/rocksdbjni
@rm -rf /tmp/rocksdbjni_not_found
......
......@@ -34,11 +34,13 @@ public class RocksDBSample {
}
options.setCreateIfMissing(true)
.createStatistics()
.setWriteBufferSize(8 * SizeUnit.KB)
.setMaxWriteBufferNumber(3)
.setDisableSeekCompaction(true)
.setBlockSize(64 * SizeUnit.KB)
.setMaxBackgroundCompactions(10);
Statistics stats = options.statisticsPtr();
assert(options.createIfMissing() == true);
assert(options.writeBufferSize() == 8 * SizeUnit.KB);
......@@ -120,13 +122,33 @@ public class RocksDBSample {
assert(new String(testValue).equals(
new String(enoughArray, 0, len)));
writeOpts.dispose();
try {
for (TickerType statsType : TickerType.values()) {
stats.getTickerCount(statsType);
}
System.out.println("getTickerCount() passed.");
} catch (Exception e) {
System.out.println("Failed in call to getTickerCount()");
assert(false); //Should never reach here.
}
try {
for (HistogramType histogramType : HistogramType.values()) {
HistogramData data = stats.geHistogramData(histogramType);
}
System.out.println("geHistogramData() passed.");
} catch (Exception e) {
System.out.println("Failed in call to geHistogramData()");
assert(false); //Should never reach here.
}
} catch (RocksDBException e) {
System.err.println(e);
}
if (db != null) {
db.close();
}
// be sure to dispose c++ pointer
// be sure to dispose c++ pointers
options.dispose();
}
}
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public class HistogramData {
private final double median_;
private final double percentile95_;
private final double percentile99_;
private final double average_;
private final double standardDeviation_;
public HistogramData(double median, double percentile95,
double percentile99, double average, double standardDeviation) {
median_ = median;
percentile95_ = percentile95;
percentile99_ = percentile99;
average_ = average;
standardDeviation_ = standardDeviation;
}
public double getMedian() {
return median_;
}
public double getPercentile95() {
return percentile95_;
}
public double getPercentile99() {
return percentile99_;
}
public double getAverage() {
return average_;
}
public double getStandardDeviation() {
return standardDeviation_;
}
}
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public enum HistogramType {
DB_GET(0),
DB_WRITE(1),
COMPACTION_TIME(2),
TABLE_SYNC_MICROS(3),
COMPACTION_OUTFILE_SYNC_MICROS(4),
WAL_FILE_SYNC_MICROS(5),
MANIFEST_FILE_SYNC_MICROS(6),
// TIME SPENT IN IO DURING TABLE OPEN
TABLE_OPEN_IO_MICROS(7),
DB_MULTIGET(8),
READ_BLOCK_COMPACTION_MICROS(9),
READ_BLOCK_GET_MICROS(10),
WRITE_RAW_BLOCK_MICROS(11),
STALL_L0_SLOWDOWN_COUNT(12),
STALL_MEMTABLE_COMPACTION_COUNT(13),
STALL_L0_NUM_FILES_COUNT(14),
HARD_RATE_LIMIT_DELAY_COUNT(15),
SOFT_RATE_LIMIT_DELAY_COUNT(16),
NUM_FILES_IN_SINGLE_COMPACTION(17);
private final int value_;
private HistogramType(int value) {
value_ = value;
}
public int getValue() {
return value_;
}
}
......@@ -507,7 +507,39 @@ public class Options {
assert(isInitialized());
return maxBackgroundCompactions(nativeHandle_);
}
private native int maxBackgroundCompactions(long handle);
/**
* Creates statistics object which collects metrics about database operations.
Statistics objects should not be shared between DB instances as
it does not use any locks to prevent concurrent updates.
*
* @return the instance of the current Options.
* @see RocksDB.open()
*/
public Options createStatistics() {
assert(isInitialized());
createStatistics(nativeHandle_);
return this;
}
/**
* Returns statistics object. Calls createStatistics() if
* C++ returns NULL pointer for statistics.
*
* @return the instance of the statistics object.
* @see createStatistics()
*/
public Statistics statisticsPtr() {
assert(isInitialized());
long statsPtr = statisticsPtr(nativeHandle_);
if(statsPtr == 0) {
createStatistics();
statsPtr = statisticsPtr(nativeHandle_);
}
return new Statistics(statsPtr);
}
/**
* Specifies the maximum number of concurrent background compaction jobs,
......@@ -528,8 +560,6 @@ public class Options {
setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions);
return this;
}
private native void setMaxBackgroundCompactions(
long handle, int maxBackgroundCompactions);
/**
* Returns the maximum number of concurrent background flush jobs.
......@@ -1104,7 +1134,7 @@ public class Options {
* in the c++ side.
*/
public synchronized void dispose() {
if (nativeHandle_ != 0) {
if (isInitialized()) {
dispose0();
}
}
......@@ -1131,6 +1161,11 @@ public class Options {
private native void setDisableSeekCompaction(
long handle, boolean disableSeekCompaction);
private native boolean disableSeekCompaction(long handle);
private native void setMaxBackgroundCompactions(
long handle, int maxBackgroundCompactions);
private native int maxBackgroundCompactions(long handle);
private native void createStatistics(long optHandle);
private native long statisticsPtr(long optHandle);
long nativeHandle_;
long cacheSize_;
......
......@@ -5,7 +5,6 @@
package org.rocksdb;
import java.lang.*;
import java.util.*;
import java.io.Closeable;
import java.io.IOException;
......
......@@ -5,7 +5,6 @@
package org.rocksdb;
import java.lang.*;
import java.util.*;
/**
......
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
/**
* Statistics to analyze the performance of a db. Pointer for statistics object
* is managed by Options class.
*/
public class Statistics {
private final long statsHandle_;
public Statistics(long statsHandle) {
statsHandle_ = statsHandle;
}
public long getTickerCount(TickerType tickerType) {
assert(isInitialized());
return getTickerCount0(tickerType.getValue(), statsHandle_);
}
public HistogramData geHistogramData(HistogramType histogramType) {
assert(isInitialized());
HistogramData hist = geHistogramData0(
histogramType.getValue(), statsHandle_);
return hist;
}
private boolean isInitialized() {
return (statsHandle_ != 0);
}
private native long getTickerCount0(int tickerType, long handle);
private native HistogramData geHistogramData0(int histogramType, long handle);
}
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
package org.rocksdb;
public enum TickerType {
// total block cache misses
// REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
// BLOCK_CACHE_FILTER_MISS +
// BLOCK_CACHE_DATA_MISS;
BLOCK_CACHE_MISS(0),
// total block cache hit
// REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
// BLOCK_CACHE_FILTER_HIT +
// BLOCK_CACHE_DATA_HIT;
BLOCK_CACHE_HIT(1),
// # of blocks added to block cache.
BLOCK_CACHE_ADD(2),
// # of times cache miss when accessing index block from block cache.
BLOCK_CACHE_INDEX_MISS(3),
// # of times cache hit when accessing index block from block cache.
BLOCK_CACHE_INDEX_HIT(4),
// # of times cache miss when accessing filter block from block cache.
BLOCK_CACHE_FILTER_MISS(5),
// # of times cache hit when accessing filter block from block cache.
BLOCK_CACHE_FILTER_HIT(6),
// # of times cache miss when accessing data block from block cache.
BLOCK_CACHE_DATA_MISS(7),
// # of times cache hit when accessing data block from block cache.
BLOCK_CACHE_DATA_HIT(8),
// # of times bloom filter has avoided file reads.
BLOOM_FILTER_USEFUL(9),
// # of memtable hits.
MEMTABLE_HIT(10),
// # of memtable misses.
MEMTABLE_MISS(11),
/**
* COMPACTION_KEY_DROP_* count the reasons for key drop during compaction
* There are 3 reasons currently.
*/
COMPACTION_KEY_DROP_NEWER_ENTRY(12), // key was written with a newer value.
COMPACTION_KEY_DROP_OBSOLETE(13), // The key is obsolete.
COMPACTION_KEY_DROP_USER(14), // user compaction function has dropped the key.
// Number of keys written to the database via the Put and Write call's
NUMBER_KEYS_WRITTEN(15),
// Number of Keys read,
NUMBER_KEYS_READ(16),
// Number keys updated, if inplace update is enabled
NUMBER_KEYS_UPDATED(17),
// Bytes written / read
BYTES_WRITTEN(18),
BYTES_READ(19),
NO_FILE_CLOSES(20),
NO_FILE_OPENS(21),
NO_FILE_ERRORS(22),
// Time system had to wait to do LO-L1 compactions
STALL_L0_SLOWDOWN_MICROS(23),
// Time system had to wait to move memtable to L1.
STALL_MEMTABLE_COMPACTION_MICROS(24),
// write throttle because of too many files in L0
STALL_L0_NUM_FILES_MICROS(25),
RATE_LIMIT_DELAY_MILLIS(26),
NO_ITERATORS(27), // number of iterators currently open
// Number of MultiGet calls, keys read, and bytes read
NUMBER_MULTIGET_CALLS(28),
NUMBER_MULTIGET_KEYS_READ(29),
NUMBER_MULTIGET_BYTES_READ(30),
// Number of deletes records that were not required to be
// written to storage because key does not exist
NUMBER_FILTERED_DELETES(31),
NUMBER_MERGE_FAILURES(32),
SEQUENCE_NUMBER(33),
// number of times bloom was checked before creating iterator on a
// file, and the number of times the check was useful in avoiding
// iterator creation (and thus likely IOPs).
BLOOM_FILTER_PREFIX_CHECKED(34),
BLOOM_FILTER_PREFIX_USEFUL(35),
// Number of times we had to reseek inside an iteration to skip
// over large number of keys with same userkey.
NUMBER_OF_RESEEKS_IN_ITERATION(36),
// Record the number of calls to GetUpadtesSince. Useful to keep track of
// transaction log iterator refreshes
GET_UPDATES_SINCE_CALLS(37),
BLOCK_CACHE_COMPRESSED_MISS(38), // miss in the compressed block cache
BLOCK_CACHE_COMPRESSED_HIT(39), // hit in the compressed block cache
WAL_FILE_SYNCED(40), // Number of times WAL sync is done
WAL_FILE_BYTES(41), // Number of bytes written to WAL
// Writes can be processed by requesting thread or by the thread at the
// head of the writers queue.
WRITE_DONE_BY_SELF(42),
WRITE_DONE_BY_OTHER(43),
WRITE_WITH_WAL(44), // Number of Write calls that request WAL
COMPACT_READ_BYTES(45), // Bytes read during compaction
COMPACT_WRITE_BYTES(46), // Bytes written during compaction
// Number of table's properties loaded directly from file, without creating
// table reader object.
NUMBER_DIRECT_LOAD_TABLE_PROPERTIES(47),
NUMBER_SUPERVERSION_ACQUIRES(48),
NUMBER_SUPERVERSION_RELEASES(49),
NUMBER_SUPERVERSION_CLEANUPS(50);
private final int value_;
private TickerType(int value) {
value_ = value;
}
public int getValue() {
return value_;
}
}
......@@ -5,7 +5,6 @@
package org.rocksdb;
import java.lang.*;
import java.util.*;
/**
......@@ -118,4 +117,3 @@ class WriteBatchInternal {
static native long sequence(WriteBatch batch);
static native void append(WriteBatch b1, WriteBatch b2);
}
......@@ -9,7 +9,6 @@
package org.rocksdb;
import java.util.*;
import java.lang.*;
import java.io.UnsupportedEncodingException;
/**
......
......@@ -9,12 +9,14 @@
#include <stdlib.h>
#include <jni.h>
#include <string>
#include <memory>
#include "include/org_rocksdb_Options.h"
#include "include/org_rocksdb_WriteOptions.h"
#include "rocksjni/portal.h"
#include "rocksdb/db.h"
#include "rocksdb/options.h"
#include "rocksdb/statistics.h"
/*
* Class: org_rocksdb_Options
......@@ -35,7 +37,7 @@ void Java_org_rocksdb_Options_dispose0(JNIEnv* env, jobject jobj) {
rocksdb::Options* op = rocksdb::OptionsJni::getHandle(env, jobj);
delete op;
rocksdb::OptionsJni::setHandle(env, jobj, op);
rocksdb::OptionsJni::setHandle(env, jobj, nullptr);
}
/*
......@@ -91,6 +93,27 @@ void Java_org_rocksdb_Options_setMaxWriteBufferNumber(
jmax_write_buffer_number;
}
/*
* Class: org_rocksdb_Options
* Method: createStatistics
* Signature: (J)V
*/
void Java_org_rocksdb_Options_createStatistics(
JNIEnv* env, jobject jobj, jlong jOptHandle) {
reinterpret_cast<rocksdb::Options*>(jOptHandle)->statistics =
rocksdb::CreateDBStatistics();
}
/*
* Class: org_rocksdb_Options
* Method: statisticsPtr
* Signature: (J)J
*/
jlong Java_org_rocksdb_Options_statisticsPtr(
JNIEnv* env, jobject jobj, jlong jOptHandle) {
auto st = reinterpret_cast<rocksdb::Options*>(jOptHandle)->statistics.get();
return reinterpret_cast<jlong>(st);
}
/*
* Class: org_rocksdb_Options
......@@ -830,5 +853,3 @@ jboolean Java_org_rocksdb_WriteOptions_disableWAL(
JNIEnv* env, jobject jwrite_options, jlong jhandle) {
return reinterpret_cast<rocksdb::WriteOptions*>(jhandle)->disableWAL;
}
......@@ -172,6 +172,15 @@ class WriteBatchJni {
}
};
class HistogramDataJni {
public:
static jmethodID getConstructorMethodId(JNIEnv* env, jclass jclazz) {
static jmethodID mid = env->GetMethodID(
jclazz, "<init>", "(DDDDD)V");
assert(mid != nullptr);
return mid;
}
};
class BackupableDBOptionsJni {
public:
// Get the java class id of org.rocksdb.BackupableDBOptions.
......
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// This file implements the "bridge" between Java and C++ and enables
// calling c++ rocksdb::Statistics methods from Java side.
#include <stdio.h>
#include <stdlib.h>
#include <jni.h>
#include "include/org_rocksdb_Statistics.h"
#include "rocksjni/portal.h"
#include "rocksdb/statistics.h"
#include <iostream>
/*
* Class: org_rocksdb_Statistics
* Method: getTickerCount0
* Signature: (IJ)J
*/
jlong Java_org_rocksdb_Statistics_getTickerCount0(
JNIEnv* env, jobject jobj, int tickerType, jlong handle) {
auto st = reinterpret_cast<rocksdb::Statistics*>(handle);
assert(st != nullptr);
return st->getTickerCount(static_cast<rocksdb::Tickers>(tickerType));
}
/*
* Class: org_rocksdb_Statistics
* Method: geHistogramData0
* Signature: (IJ)Lorg/rocksdb/HistogramData;
*/
jobject Java_org_rocksdb_Statistics_geHistogramData0(
JNIEnv* env, jobject jobj, int histogramType, jlong handle) {
auto st = reinterpret_cast<rocksdb::Statistics*>(handle);
assert(st != nullptr);
rocksdb::HistogramData data;
st->histogramData(static_cast<rocksdb::Histograms>(histogramType),
&data);
// Don't reuse class pointer
jclass jclazz = env->FindClass("org/rocksdb/HistogramData");
jmethodID mid = rocksdb::HistogramDataJni::getConstructorMethodId(
env, jclazz);
return env->NewObject(jclazz, mid, data.median, data.percentile95,
data.percentile99, data.average, data.standard_deviation);
}
......@@ -262,4 +262,3 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
return jstate;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册