Improved metrics (#240)

Added db metrics (kvdb_bytes_read, kvdb_bytes_written, kvdb_reads, kvdb_writes)
Added --metrics-prefix=[prefix]
This commit is contained in:
adria0.eth 2021-03-03 22:44:35 +01:00 committed by GitHub
parent ba011eba15
commit 0fcb102f03
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 440 additions and 189 deletions

6
Cargo.lock generated
View File

@ -1073,6 +1073,7 @@ dependencies = [
"rlp_compress", "rlp_compress",
"rlp_derive", "rlp_derive",
"rustc-hex 1.0.0", "rustc-hex 1.0.0",
"stats",
"tempdir", "tempdir",
"triehash-ethereum", "triehash-ethereum",
] ]
@ -1123,9 +1124,12 @@ dependencies = [
"ethereum-types 0.4.2", "ethereum-types 0.4.2",
"heapsize", "heapsize",
"kvdb", "kvdb",
"kvdb-memorydb",
"kvdb-rocksdb",
"parking_lot 0.7.1", "parking_lot 0.7.1",
"rlp 0.3.0", "rlp 0.3.0",
"rlp_derive", "rlp_derive",
"stats",
] ]
[[package]] [[package]]
@ -2154,6 +2158,7 @@ name = "journaldb"
version = "0.2.0" version = "0.2.0"
dependencies = [ dependencies = [
"env_logger", "env_logger",
"ethcore-db",
"ethereum-types 0.4.2", "ethereum-types 0.4.2",
"fastmap", "fastmap",
"hash-db", "hash-db",
@ -2986,6 +2991,7 @@ name = "parity-local-store"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"common-types", "common-types",
"ethcore-db",
"ethcore-io", "ethcore-io",
"ethkey", "ethkey",
"kvdb", "kvdb",

View File

@ -475,6 +475,10 @@ usage! {
"--metrics", "--metrics",
"Enable prometheus metrics (only full client).", "Enable prometheus metrics (only full client).",
ARG arg_metrics_prefix: (String) = "", or |c: &Config| c.metrics.as_ref()?.prefix.clone(),
"--metrics-prefix=[prefix]",
"Prepend the specified prefix to the exported metrics names.",
ARG arg_metrics_port: (u16) = 3000u16, or |c: &Config| c.metrics.as_ref()?.port.clone(), ARG arg_metrics_port: (u16) = 3000u16, or |c: &Config| c.metrics.as_ref()?.port.clone(),
"--metrics-port=[PORT]", "--metrics-port=[PORT]",
"Specify the port portion of the metrics server.", "Specify the port portion of the metrics server.",
@ -922,6 +926,7 @@ struct Ipc {
#[serde(deny_unknown_fields)] #[serde(deny_unknown_fields)]
struct Metrics { struct Metrics {
enable: Option<bool>, enable: Option<bool>,
prefix: Option<String>,
port: Option<u16>, port: Option<u16>,
interface: Option<String>, interface: Option<String>,
} }
@ -1338,6 +1343,7 @@ mod tests {
// METRICS // METRICS
flag_metrics: false, flag_metrics: false,
arg_metrics_prefix: "".into(),
arg_metrics_port: 3000u16, arg_metrics_port: 3000u16,
arg_metrics_interface: "local".into(), arg_metrics_interface: "local".into(),
@ -1542,6 +1548,7 @@ mod tests {
}), }),
metrics: Some(Metrics { metrics: Some(Metrics {
enable: Some(true), enable: Some(true),
prefix: Some("oe".to_string()),
interface: Some("local".to_string()), interface: Some("local".to_string()),
port: Some(4000), port: Some(4000),
}), }),

View File

@ -36,7 +36,7 @@ apis = ["rpc", "eth"]
enable = true enable = true
interface = "local" interface = "local"
port = 4000 port = 4000
prefix = "oe"
[secretstore] [secretstore]
http_port = 8082 http_port = 8082

View File

@ -958,6 +958,7 @@ impl Configuration {
fn metrics_config(&self) -> Result<MetricsConfiguration, String> { fn metrics_config(&self) -> Result<MetricsConfiguration, String> {
let conf = MetricsConfiguration { let conf = MetricsConfiguration {
enabled: self.metrics_enabled(), enabled: self.metrics_enabled(),
prefix: self.metrics_prefix(),
interface: self.metrics_interface(), interface: self.metrics_interface(),
port: self.args.arg_ports_shift + self.args.arg_metrics_port, port: self.args.arg_ports_shift + self.args.arg_metrics_port,
}; };
@ -1147,6 +1148,10 @@ impl Configuration {
self.args.flag_metrics self.args.flag_metrics
} }
fn metrics_prefix(&self) -> String {
self.args.arg_metrics_prefix.clone()
}
fn secretstore_enabled(&self) -> bool { fn secretstore_enabled(&self) -> bool {
!self.args.flag_no_secretstore && cfg!(feature = "secretstore") !self.args.flag_no_secretstore && cfg!(feature = "secretstore")
} }

View File

@ -24,7 +24,8 @@ use self::{
}; };
use blooms_db; use blooms_db;
use ethcore::client::ClientConfig; use ethcore::client::ClientConfig;
use kvdb::KeyValueDB; use ethcore_db::KeyValueDB;
use stats::PrometheusMetrics;
use std::{fs, io, path::Path, sync::Arc}; use std::{fs, io, path::Path, sync::Arc};
mod blooms; mod blooms;
@ -53,6 +54,10 @@ impl BlockChainDB for AppDB {
} }
} }
impl PrometheusMetrics for AppDB {
fn prometheus_metrics(&self, _: &mut stats::PrometheusRegistry) {}
}
/// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path. /// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path.
#[cfg(feature = "secretstore")] #[cfg(feature = "secretstore")]
pub fn open_secretstore_db(data_path: &str) -> Result<Arc<dyn KeyValueDB>, String> { pub fn open_secretstore_db(data_path: &str) -> Result<Arc<dyn KeyValueDB>, String> {
@ -101,8 +106,11 @@ pub fn open_database(
fs::create_dir_all(&blooms_path)?; fs::create_dir_all(&blooms_path)?;
fs::create_dir_all(&trace_blooms_path)?; fs::create_dir_all(&trace_blooms_path)?;
let db = Database::open(&config, client_path)?;
let db_with_metrics = ethcore_db::DatabaseWithMetrics::new(db);
let db = AppDB { let db = AppDB {
key_value: Arc::new(Database::open(&config, client_path)?), key_value: Arc::new(db_with_metrics),
blooms: blooms_db::Database::open(blooms_path)?, blooms: blooms_db::Database::open(blooms_path)?,
trace_blooms: blooms_db::Database::open(trace_blooms_path)?, trace_blooms: blooms_db::Database::open(trace_blooms_path)?,
}; };

View File

@ -8,13 +8,15 @@ use hyper::{service::service_fn_ok, Body, Method, Request, Response, Server, Sta
use stats::{ use stats::{
prometheus::{self, Encoder}, prometheus::{self, Encoder},
prometheus_gauge, PrometheusMetrics, PrometheusMetrics, PrometheusRegistry,
}; };
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub struct MetricsConfiguration { pub struct MetricsConfiguration {
/// Are metrics enabled (default is false)? /// Are metrics enabled (default is false)?
pub enabled: bool, pub enabled: bool,
/// Prefix
pub prefix: String,
/// The IP of the network interface used (default is 127.0.0.1). /// The IP of the network interface used (default is 127.0.0.1).
pub interface: String, pub interface: String,
/// The network port (default is 3000). /// The network port (default is 3000).
@ -25,6 +27,7 @@ impl Default for MetricsConfiguration {
fn default() -> Self { fn default() -> Self {
MetricsConfiguration { MetricsConfiguration {
enabled: false, enabled: false,
prefix: "".into(),
interface: "127.0.0.1".into(), interface: "127.0.0.1".into(),
port: 3000, port: 3000,
} }
@ -35,19 +38,22 @@ struct State {
rpc_apis: Arc<rpc_apis::FullDependencies>, rpc_apis: Arc<rpc_apis::FullDependencies>,
} }
fn handle_request(req: Request<Body>, state: Arc<Mutex<State>>) -> Response<Body> { fn handle_request(
req: Request<Body>,
conf: Arc<MetricsConfiguration>,
state: Arc<Mutex<State>>,
) -> Response<Body> {
let (parts, _body) = req.into_parts(); let (parts, _body) = req.into_parts();
match (parts.method, parts.uri.path()) { match (parts.method, parts.uri.path()) {
(Method::GET, "/metrics") => { (Method::GET, "/metrics") => {
let start = Instant::now(); let start = Instant::now();
let mut reg = prometheus::Registry::new(); let mut reg = PrometheusRegistry::new(conf.prefix.clone());
let state = state.lock(); let state = state.lock();
state.rpc_apis.client.prometheus_metrics(&mut reg); state.rpc_apis.client.prometheus_metrics(&mut reg);
state.rpc_apis.sync.prometheus_metrics(&mut reg); state.rpc_apis.sync.prometheus_metrics(&mut reg);
let elapsed = start.elapsed(); let elapsed = start.elapsed();
prometheus_gauge( reg.register_gauge(
&mut reg,
"metrics_time", "metrics_time",
"Time to perform rpc metrics", "Time to perform rpc metrics",
elapsed.as_millis() as i64, elapsed.as_millis() as i64,
@ -55,7 +61,7 @@ fn handle_request(req: Request<Body>, state: Arc<Mutex<State>>) -> Response<Body
let mut buffer = vec![]; let mut buffer = vec![];
let encoder = prometheus::TextEncoder::new(); let encoder = prometheus::TextEncoder::new();
let metric_families = reg.gather(); let metric_families = reg.registry().gather();
encoder encoder
.encode(&metric_families, &mut buffer) .encode(&metric_families, &mut buffer)
@ -90,17 +96,20 @@ pub fn start_prometheus_metrics(
rpc_apis: deps.apis.clone(), rpc_apis: deps.apis.clone(),
}; };
let state = Arc::new(Mutex::new(state)); let state = Arc::new(Mutex::new(state));
let conf = Arc::new(conf.to_owned());
let server = Server::bind(&addr) let server = Server::bind(&addr)
.serve(move || { .serve(move || {
// This is the `Service` that will handle the connection. // This is the `Service` that will handle the connection.
// `service_fn_ok` is a helper to convert a function that // `service_fn_ok` is a helper to convert a function that
// returns a Response into a `Service`. // returns a Response into a `Service`.
let state = state.clone(); let state = state.clone();
service_fn_ok(move |req: Request<Body>| handle_request(req, state.clone())) let conf = conf.clone();
service_fn_ok(move |req: Request<Body>| {
handle_request(req, conf.clone(), state.clone())
})
}) })
.map_err(|e| eprintln!("server error: {}", e)); .map_err(|e| eprintln!("server error: {}", e));
println!("Listening on http://{}", addr); info!("Started prometeus metrics at http://{}/metrics", addr);
deps.executor.spawn(server); deps.executor.spawn(server);

View File

@ -7,6 +7,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies] [dependencies]
common-types = { path = "../../../ethcore/types" } common-types = { path = "../../../ethcore/types" }
ethcore-io = { path = "../../../runtime/io" } ethcore-io = { path = "../../../runtime/io" }
ethcore-db = { path = "../../../db/db"}
kvdb = "0.1" kvdb = "0.1"
log = "0.4" log = "0.4"
rlp = { version = "0.3.0", features = ["ethereum"] } rlp = { version = "0.3.0", features = ["ethereum"] }

View File

@ -18,14 +18,15 @@
use std::{fmt, sync::Arc, time::Duration}; use std::{fmt, sync::Arc, time::Duration};
use ethcore_db::KeyValueDB;
use io::IoHandler; use io::IoHandler;
use kvdb::KeyValueDB;
use types::transaction::{ use types::transaction::{
Condition as TransactionCondition, PendingTransaction, SignedTransaction, TypedTransaction, Condition as TransactionCondition, PendingTransaction, SignedTransaction, TypedTransaction,
UnverifiedTransaction, UnverifiedTransaction,
}; };
extern crate common_types as types; extern crate common_types as types;
extern crate ethcore_db;
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate kvdb; extern crate kvdb;
extern crate rlp; extern crate rlp;
@ -253,7 +254,7 @@ mod tests {
#[test] #[test]
fn twice_empty() { fn twice_empty() {
let db = Arc::new(::kvdb_memorydb::create(0)); let db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
{ {
let store = super::create(db.clone(), None, Dummy(vec![])); let store = super::create(db.clone(), None, Dummy(vec![]));
@ -284,7 +285,7 @@ mod tests {
}) })
.collect(); .collect();
let db = Arc::new(::kvdb_memorydb::create(0)); let db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
{ {
// nothing written yet, will write pending. // nothing written yet, will write pending.
@ -325,7 +326,7 @@ mod tests {
PendingTransaction::new(signed, None) PendingTransaction::new(signed, None)
}); });
let db = Arc::new(::kvdb_memorydb::create(0)); let db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
{ {
// nothing written, will write bad. // nothing written, will write bad.
let store = super::create(db.clone(), None, Dummy(transactions.clone())); let store = super::create(db.clone(), None, Dummy(transactions.clone()));

View File

@ -12,6 +12,9 @@ common-types = { path = "../../ethcore/types" }
ethereum-types = "0.4" ethereum-types = "0.4"
heapsize = "0.4" heapsize = "0.4"
kvdb = "0.1" kvdb = "0.1"
kvdb-rocksdb = "0.1.3"
kvdb-memorydb = "0.1"
parking_lot = "0.7" parking_lot = "0.7"
rlp = { version = "0.3.0", features = ["ethereum"] } rlp = { version = "0.3.0", features = ["ethereum"] }
rlp_derive = { path = "../../util/rlp-derive" } rlp_derive = { path = "../../util/rlp-derive" }
stats = { path = "../../util/stats" }

View File

@ -16,9 +16,11 @@
//! Database utilities and definitions. //! Database utilities and definitions.
use kvdb::{DBTransaction, KeyValueDB}; use kvdb::DBTransaction;
use kvdb_rocksdb::Database;
use parking_lot::RwLock; use parking_lot::RwLock;
use std::{collections::HashMap, hash::Hash, ops::Deref}; use stats::{PrometheusMetrics, PrometheusRegistry};
use std::{collections::HashMap, hash::Hash, io::Read, ops::Deref};
use rlp; use rlp;
@ -282,7 +284,7 @@ impl Writable for DBTransaction {
} }
} }
impl<KVDB: KeyValueDB + ?Sized> Readable for KVDB { impl<KVDB: kvdb::KeyValueDB + ?Sized> Readable for KVDB {
fn read<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> Option<T> fn read<T, R>(&self, col: Option<u32>, key: &dyn Key<T, Target = R>) -> Option<T>
where where
T: rlp::Decodable, T: rlp::Decodable,
@ -311,3 +313,197 @@ impl<KVDB: KeyValueDB + ?Sized> Readable for KVDB {
} }
} }
} }
/// Database with enabled statistics
pub struct DatabaseWithMetrics {
db: Database,
reads: std::sync::atomic::AtomicI64,
writes: std::sync::atomic::AtomicI64,
bytes_read: std::sync::atomic::AtomicI64,
bytes_written: std::sync::atomic::AtomicI64,
}
impl DatabaseWithMetrics {
/// Create a new instance
pub fn new(db: Database) -> Self {
Self {
db,
reads: std::sync::atomic::AtomicI64::new(0),
writes: std::sync::atomic::AtomicI64::new(0),
bytes_read: std::sync::atomic::AtomicI64::new(0),
bytes_written: std::sync::atomic::AtomicI64::new(0),
}
}
}
/// Ethcore definition of a KeyValueDB with embeeded metrics
pub trait KeyValueDB: kvdb::KeyValueDB + PrometheusMetrics {}
impl kvdb::KeyValueDB for DatabaseWithMetrics {
fn get(&self, col: Option<u32>, key: &[u8]) -> std::io::Result<Option<kvdb::DBValue>> {
let res = self.db.get(col, key);
let count = res
.as_ref()
.map_or(0, |y| y.as_ref().map_or(0, |x| x.bytes().count()));
self.reads
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
self.bytes_read
.fetch_add(count as i64, std::sync::atomic::Ordering::Relaxed);
res
}
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
let res = self.db.get_by_prefix(col, prefix);
let count = res.as_ref().map_or(0, |x| x.bytes().count());
self.reads
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
self.bytes_read
.fetch_add(count as i64, std::sync::atomic::Ordering::Relaxed);
res
}
fn write_buffered(&self, transaction: DBTransaction) {
let mut count = 0;
for op in &transaction.ops {
count += match op {
kvdb::DBOp::Insert { value, .. } => value.bytes().count(),
_ => 0,
};
}
self.writes.fetch_add(
transaction.ops.len() as i64,
std::sync::atomic::Ordering::Relaxed,
);
self.bytes_written
.fetch_add(count as i64, std::sync::atomic::Ordering::Relaxed);
self.db.write_buffered(transaction)
}
fn write(&self, transaction: DBTransaction) -> std::io::Result<()> {
let mut count = 0;
for op in &transaction.ops {
count += match op {
kvdb::DBOp::Insert { value, .. } => value.bytes().count(),
_ => 0,
};
}
self.bytes_written
.fetch_add(count as i64, std::sync::atomic::Ordering::Relaxed);
self.writes.fetch_add(
transaction.ops.len() as i64,
std::sync::atomic::Ordering::Relaxed,
);
self.db.write(transaction)
}
fn flush(&self) -> std::io::Result<()> {
self.db.flush()
}
fn iter<'a>(
&'a self,
col: Option<u32>,
) -> Box<(dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a)> {
kvdb::KeyValueDB::iter(&self.db, col)
}
fn iter_from_prefix<'a>(
&'a self,
col: Option<u32>,
prefix: &'a [u8],
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a> {
self.db.iter_from_prefix(col, prefix)
}
fn restore(&self, new_db: &str) -> std::io::Result<()> {
self.db.restore(new_db)
}
}
impl KeyValueDB for DatabaseWithMetrics {}
impl PrometheusMetrics for DatabaseWithMetrics {
fn prometheus_metrics(&self, p: &mut PrometheusRegistry) {
p.register_counter(
"kvdb_reads",
"db reads",
self.reads.load(std::sync::atomic::Ordering::Relaxed) as i64,
);
p.register_counter(
"kvdb_writes",
"db writes",
self.writes.load(std::sync::atomic::Ordering::Relaxed) as i64,
);
p.register_counter(
"kvdb_bytes_read",
"db bytes_reads",
self.bytes_read.load(std::sync::atomic::Ordering::Relaxed) as i64,
);
p.register_counter(
"kvdb_bytes_written",
"db bytes_written",
self.bytes_written
.load(std::sync::atomic::Ordering::Relaxed) as i64,
);
}
}
/// InMemory with disabled statistics
pub struct InMemoryWithMetrics {
db: kvdb_memorydb::InMemory,
}
impl kvdb::KeyValueDB for InMemoryWithMetrics {
fn get(&self, col: Option<u32>, key: &[u8]) -> std::io::Result<Option<kvdb::DBValue>> {
self.db.get(col, key)
}
fn get_by_prefix(&self, col: Option<u32>, prefix: &[u8]) -> Option<Box<[u8]>> {
self.db.get_by_prefix(col, prefix)
}
fn write_buffered(&self, transaction: DBTransaction) {
self.db.write_buffered(transaction)
}
fn write(&self, transaction: DBTransaction) -> std::io::Result<()> {
self.db.write(transaction)
}
fn flush(&self) -> std::io::Result<()> {
self.db.flush()
}
fn iter<'a>(
&'a self,
col: Option<u32>,
) -> Box<(dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a)> {
kvdb::KeyValueDB::iter(&self.db, col)
}
fn iter_from_prefix<'a>(
&'a self,
col: Option<u32>,
prefix: &'a [u8],
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a> {
self.db.iter_from_prefix(col, prefix)
}
fn restore(&self, new_db: &str) -> std::io::Result<()> {
self.db.restore(new_db)
}
}
impl PrometheusMetrics for InMemoryWithMetrics {
fn prometheus_metrics(&self, _: &mut PrometheusRegistry) {}
}
impl KeyValueDB for InMemoryWithMetrics {}
impl InMemoryWithMetrics {
/// Create new instance
pub fn create(num_cols: u32) -> Self {
Self {
db: kvdb_memorydb::create(num_cols),
}
}
}

View File

@ -22,5 +22,6 @@ mod db;
pub mod cache_manager; pub mod cache_manager;
pub mod keys; pub mod keys;
pub use kvdb::{DBTransaction, DBValue};
pub use self::db::*; pub use self::db::*;

View File

@ -11,6 +11,7 @@ ethereum-types = "0.4"
hash-db = "0.11.0" hash-db = "0.11.0"
heapsize = "0.4" heapsize = "0.4"
keccak-hasher = { path = "../../util/keccak-hasher" } keccak-hasher = { path = "../../util/keccak-hasher" }
ethcore-db = { path = "../../db/db"}
kvdb = "0.1" kvdb = "0.1"
log = "0.4" log = "0.4"
memory-db = { path = "../memory-db" } memory-db = { path = "../memory-db" }

View File

@ -25,10 +25,10 @@ use std::{
use super::{ use super::{
error_key_already_exists, error_negatively_reference_hash, memory_db::*, LATEST_ERA_KEY, error_key_already_exists, error_negatively_reference_hash, memory_db::*, LATEST_ERA_KEY,
}; };
use ethcore_db::{DBTransaction, DBValue, KeyValueDB};
use ethereum_types::H256; use ethereum_types::H256;
use hash_db::HashDB; use hash_db::HashDB;
use keccak_hasher::KeccakHasher; use keccak_hasher::KeccakHasher;
use kvdb::{DBTransaction, DBValue, KeyValueDB};
use rlp::{decode, encode}; use rlp::{decode, encode};
use traits::JournalDB; use traits::JournalDB;
@ -222,13 +222,12 @@ mod tests {
use super::*; use super::*;
use hash_db::HashDB; use hash_db::HashDB;
use keccak::keccak; use keccak::keccak;
use kvdb_memorydb;
use JournalDB; use JournalDB;
#[test] #[test]
fn insert_same_in_fork() { fn insert_same_in_fork() {
// history is 1 // history is 1
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let mut jdb = ArchiveDB::new(Arc::new(ethcore_db::InMemoryWithMetrics::create(0)), None);
let x = jdb.insert(b"X"); let x = jdb.insert(b"X");
jdb.commit_batch(1, &keccak(b"1"), None).unwrap(); jdb.commit_batch(1, &keccak(b"1"), None).unwrap();
@ -256,7 +255,7 @@ mod tests {
#[test] #[test]
fn long_history() { fn long_history() {
// history is 3 // history is 3
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let mut jdb = ArchiveDB::new(Arc::new(ethcore_db::InMemoryWithMetrics::create(0)), None);
let h = jdb.insert(b"foo"); let h = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.contains(&h)); assert!(jdb.contains(&h));
@ -276,7 +275,7 @@ mod tests {
#[test] #[test]
#[should_panic] #[should_panic]
fn multiple_owed_removal_not_allowed() { fn multiple_owed_removal_not_allowed() {
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let mut jdb = ArchiveDB::new(Arc::new(ethcore_db::InMemoryWithMetrics::create(0)), None);
let h = jdb.insert(b"foo"); let h = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
assert!(jdb.contains(&h)); assert!(jdb.contains(&h));
@ -290,7 +289,7 @@ mod tests {
#[test] #[test]
fn complex() { fn complex() {
// history is 1 // history is 1
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let mut jdb = ArchiveDB::new(Arc::new(ethcore_db::InMemoryWithMetrics::create(0)), None);
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -326,7 +325,7 @@ mod tests {
#[test] #[test]
fn fork() { fn fork() {
// history is 1 // history is 1
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let mut jdb = ArchiveDB::new(Arc::new(ethcore_db::InMemoryWithMetrics::create(0)), None);
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -355,7 +354,7 @@ mod tests {
#[test] #[test]
fn overwrite() { fn overwrite() {
// history is 1 // history is 1
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let mut jdb = ArchiveDB::new(Arc::new(ethcore_db::InMemoryWithMetrics::create(0)), None);
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
@ -377,7 +376,7 @@ mod tests {
#[test] #[test]
fn fork_same_key() { fn fork_same_key() {
// history is 1 // history is 1
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let mut jdb = ArchiveDB::new(Arc::new(ethcore_db::InMemoryWithMetrics::create(0)), None);
jdb.commit_batch(0, &keccak(b"0"), None).unwrap(); jdb.commit_batch(0, &keccak(b"0"), None).unwrap();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
@ -396,7 +395,7 @@ mod tests {
#[test] #[test]
fn reopen() { fn reopen() {
let shared_db = Arc::new(kvdb_memorydb::create(0)); let shared_db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
let bar = H256::random(); let bar = H256::random();
let foo = { let foo = {
@ -426,7 +425,7 @@ mod tests {
#[test] #[test]
fn reopen_remove() { fn reopen_remove() {
let shared_db = Arc::new(kvdb_memorydb::create(0)); let shared_db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
let foo = { let foo = {
let mut jdb = ArchiveDB::new(shared_db.clone(), None); let mut jdb = ArchiveDB::new(shared_db.clone(), None);
@ -460,7 +459,7 @@ mod tests {
#[test] #[test]
fn reopen_fork() { fn reopen_fork() {
let shared_db = Arc::new(kvdb_memorydb::create(0)); let shared_db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
let (foo, _, _) = { let (foo, _, _) = {
let mut jdb = ArchiveDB::new(shared_db.clone(), None); let mut jdb = ArchiveDB::new(shared_db.clone(), None);
// history is 1 // history is 1
@ -488,7 +487,7 @@ mod tests {
#[test] #[test]
fn inject() { fn inject() {
let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(0)), None); let mut jdb = ArchiveDB::new(Arc::new(ethcore_db::InMemoryWithMetrics::create(0)), None);
let key = jdb.insert(b"dog"); let key = jdb.insert(b"dog");
jdb.inject_batch().unwrap(); jdb.inject_batch().unwrap();

View File

@ -26,11 +26,11 @@ use super::{
error_key_already_exists, error_negatively_reference_hash, traits::JournalDB, LATEST_ERA_KEY, error_key_already_exists, error_negatively_reference_hash, traits::JournalDB, LATEST_ERA_KEY,
}; };
use bytes::Bytes; use bytes::Bytes;
use ethcore_db::{DBTransaction, DBValue, KeyValueDB};
use ethereum_types::H256; use ethereum_types::H256;
use hash_db::HashDB; use hash_db::HashDB;
use heapsize::HeapSizeOf; use heapsize::HeapSizeOf;
use keccak_hasher::KeccakHasher; use keccak_hasher::KeccakHasher;
use kvdb::{DBTransaction, DBValue, KeyValueDB};
use memory_db::*; use memory_db::*;
use parking_lot::RwLock; use parking_lot::RwLock;
use rlp::{decode, encode}; use rlp::{decode, encode};
@ -622,7 +622,6 @@ mod tests {
use super::{super::traits::JournalDB, *}; use super::{super::traits::JournalDB, *};
use hash_db::HashDB; use hash_db::HashDB;
use keccak::keccak; use keccak::keccak;
use kvdb_memorydb;
#[test] #[test]
fn insert_same_in_fork() { fn insert_same_in_fork() {
@ -913,13 +912,13 @@ mod tests {
} }
fn new_db() -> EarlyMergeDB { fn new_db() -> EarlyMergeDB {
let backing = Arc::new(kvdb_memorydb::create(0)); let backing = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
EarlyMergeDB::new(backing, None) EarlyMergeDB::new(backing, None)
} }
#[test] #[test]
fn reopen() { fn reopen() {
let shared_db = Arc::new(kvdb_memorydb::create(0)); let shared_db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
let bar = H256::random(); let bar = H256::random();
let foo = { let foo = {
@ -1105,7 +1104,7 @@ mod tests {
fn reopen_remove_three() { fn reopen_remove_three() {
let _ = ::env_logger::try_init(); let _ = ::env_logger::try_init();
let shared_db = Arc::new(kvdb_memorydb::create(0)); let shared_db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
let foo = keccak(b"foo"); let foo = keccak(b"foo");
{ {
@ -1166,7 +1165,7 @@ mod tests {
#[test] #[test]
fn reopen_fork() { fn reopen_fork() {
let shared_db = Arc::new(kvdb_memorydb::create(0)); let shared_db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
let (foo, bar, baz) = { let (foo, bar, baz) = {
let mut jdb = EarlyMergeDB::new(shared_db.clone(), None); let mut jdb = EarlyMergeDB::new(shared_db.clone(), None);

View File

@ -20,6 +20,7 @@ extern crate heapsize;
#[macro_use] #[macro_use]
extern crate log; extern crate log;
extern crate ethcore_db;
extern crate ethereum_types; extern crate ethereum_types;
extern crate fastmap; extern crate fastmap;
extern crate hash_db; extern crate hash_db;
@ -147,7 +148,7 @@ impl fmt::Display for Algorithm {
/// Create a new `JournalDB` trait object over a generic key-value database. /// Create a new `JournalDB` trait object over a generic key-value database.
pub fn new( pub fn new(
backing: Arc<dyn kvdb::KeyValueDB>, backing: Arc<dyn ethcore_db::KeyValueDB>,
algorithm: Algorithm, algorithm: Algorithm,
col: Option<u32>, col: Option<u32>,
) -> Box<dyn JournalDB> { ) -> Box<dyn JournalDB> {

View File

@ -23,10 +23,10 @@ use std::{
}; };
use super::error_negatively_reference_hash; use super::error_negatively_reference_hash;
use ethcore_db::{DBTransaction, DBValue, KeyValueDB};
use ethereum_types::H256; use ethereum_types::H256;
use hash_db::HashDB; use hash_db::HashDB;
use keccak_hasher::KeccakHasher; use keccak_hasher::KeccakHasher;
use kvdb::{DBTransaction, DBValue, KeyValueDB};
use memory_db::*; use memory_db::*;
use rlp::{decode, encode, Decodable, DecoderError, Encodable, Rlp, RlpStream}; use rlp::{decode, encode, Decodable, DecoderError, Encodable, Rlp, RlpStream};
@ -88,7 +88,7 @@ impl OverlayDB {
/// Create a new instance of OverlayDB with an anonymous temporary database. /// Create a new instance of OverlayDB with an anonymous temporary database.
#[cfg(test)] #[cfg(test)]
pub fn new_temp() -> OverlayDB { pub fn new_temp() -> OverlayDB {
let backing = Arc::new(::kvdb_memorydb::create(0)); let backing = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
Self::new(backing, None) Self::new(backing, None)
} }

View File

@ -23,12 +23,12 @@ use std::{
}; };
use super::{error_negatively_reference_hash, JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY}; use super::{error_negatively_reference_hash, JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY};
use ethcore_db::{DBTransaction, DBValue, KeyValueDB};
use ethereum_types::H256; use ethereum_types::H256;
use fastmap::H256FastMap; use fastmap::H256FastMap;
use hash_db::HashDB; use hash_db::HashDB;
use heapsize::HeapSizeOf; use heapsize::HeapSizeOf;
use keccak_hasher::KeccakHasher; use keccak_hasher::KeccakHasher;
use kvdb::{DBTransaction, DBValue, KeyValueDB};
use memory_db::*; use memory_db::*;
use parking_lot::RwLock; use parking_lot::RwLock;
use rlp::{decode, encode, Decodable, DecoderError, Encodable, Rlp, RlpStream}; use rlp::{decode, encode, Decodable, DecoderError, Encodable, Rlp, RlpStream};
@ -554,11 +554,10 @@ mod tests {
use super::*; use super::*;
use hash_db::HashDB; use hash_db::HashDB;
use keccak::keccak; use keccak::keccak;
use kvdb_memorydb;
use JournalDB; use JournalDB;
fn new_db() -> OverlayRecentDB { fn new_db() -> OverlayRecentDB {
let backing = Arc::new(kvdb_memorydb::create(0)); let backing = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
OverlayRecentDB::new(backing, None) OverlayRecentDB::new(backing, None)
} }
@ -832,7 +831,7 @@ mod tests {
#[test] #[test]
fn reopen() { fn reopen() {
let shared_db = Arc::new(kvdb_memorydb::create(0)); let shared_db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
let bar = H256::random(); let bar = H256::random();
let foo = { let foo = {
@ -1015,7 +1014,7 @@ mod tests {
fn reopen_remove_three() { fn reopen_remove_three() {
let _ = ::env_logger::try_init(); let _ = ::env_logger::try_init();
let shared_db = Arc::new(kvdb_memorydb::create(0)); let shared_db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
let foo = keccak(b"foo"); let foo = keccak(b"foo");
{ {
@ -1076,7 +1075,7 @@ mod tests {
#[test] #[test]
fn reopen_fork() { fn reopen_fork() {
let shared_db = Arc::new(kvdb_memorydb::create(0)); let shared_db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
let (foo, bar, baz) = { let (foo, bar, baz) = {
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);
@ -1146,7 +1145,7 @@ mod tests {
#[test] #[test]
fn earliest_era() { fn earliest_era() {
let shared_db = Arc::new(kvdb_memorydb::create(0)); let shared_db = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
// empty DB // empty DB
let mut jdb = OverlayRecentDB::new(shared_db.clone(), None); let mut jdb = OverlayRecentDB::new(shared_db.clone(), None);

View File

@ -23,11 +23,11 @@ use std::{
}; };
use super::{traits::JournalDB, LATEST_ERA_KEY}; use super::{traits::JournalDB, LATEST_ERA_KEY};
use ethcore_db::{DBTransaction, DBValue, KeyValueDB};
use ethereum_types::H256; use ethereum_types::H256;
use hash_db::HashDB; use hash_db::HashDB;
use heapsize::HeapSizeOf; use heapsize::HeapSizeOf;
use keccak_hasher::KeccakHasher; use keccak_hasher::KeccakHasher;
use kvdb::{DBTransaction, DBValue, KeyValueDB};
use memory_db::MemoryDB; use memory_db::MemoryDB;
use overlaydb::OverlayDB; use overlaydb::OverlayDB;
use rlp::{decode, encode}; use rlp::{decode, encode};
@ -252,11 +252,10 @@ mod tests {
use super::*; use super::*;
use hash_db::HashDB; use hash_db::HashDB;
use keccak::keccak; use keccak::keccak;
use kvdb_memorydb;
use JournalDB; use JournalDB;
fn new_db() -> RefCountedDB { fn new_db() -> RefCountedDB {
let backing = Arc::new(kvdb_memorydb::create(0)); let backing = Arc::new(ethcore_db::InMemoryWithMetrics::create(0));
RefCountedDB::new(backing, None) RefCountedDB::new(backing, None)
} }

View File

@ -18,10 +18,10 @@
use std::{io, sync::Arc}; use std::{io, sync::Arc};
use ethcore_db::{DBTransaction, DBValue, KeyValueDB};
use ethereum_types::H256; use ethereum_types::H256;
use hash_db::{AsHashDB, HashDB}; use hash_db::{AsHashDB, HashDB};
use keccak_hasher::KeccakHasher; use keccak_hasher::KeccakHasher;
use kvdb::{self, DBTransaction, DBValue};
use std::collections::{BTreeMap, HashMap}; use std::collections::{BTreeMap, HashMap};
/// expose keys of a hashDB for debugging or tests (slow). /// expose keys of a hashDB for debugging or tests (slow).
@ -86,7 +86,7 @@ pub trait JournalDB: KeyedHashDB {
} }
/// Get backing database. /// Get backing database.
fn backing(&self) -> &Arc<dyn kvdb::KeyValueDB>; fn backing(&self) -> &Arc<dyn KeyValueDB>;
/// Clear internal strucutres. This should called after changes have been written /// Clear internal strucutres. This should called after changes have been written
/// to the backing strage /// to the backing strage

View File

@ -26,6 +26,7 @@ rlp = { version = "0.3.0", features = ["ethereum"] }
rlp_compress = { path = "../../util/rlp-compress" } rlp_compress = { path = "../../util/rlp-compress" }
rlp_derive = { path = "../../util/rlp-derive" } rlp_derive = { path = "../../util/rlp-derive" }
triehash-ethereum = { version = "0.2", path = "../../util/triehash-ethereum" } triehash-ethereum = { version = "0.2", path = "../../util/triehash-ethereum" }
stats = { path = "../../util/stats" }
[dev-dependencies] [dev-dependencies]
env_logger = "0.5" env_logger = "0.5"

View File

@ -41,6 +41,7 @@ use common_types::{
views::{BlockView, HeaderView}, views::{BlockView, HeaderView},
BlockNumber, BlockNumber,
}; };
use db::{DBTransaction, KeyValueDB};
use ethcore_db::{ use ethcore_db::{
self as db, self as db,
cache_manager::CacheManager, cache_manager::CacheManager,
@ -50,13 +51,13 @@ use ethcore_db::{
use ethereum_types::{Bloom, BloomRef, H256, U256}; use ethereum_types::{Bloom, BloomRef, H256, U256};
use heapsize::HeapSizeOf; use heapsize::HeapSizeOf;
use itertools::Itertools; use itertools::Itertools;
use kvdb::{DBTransaction, KeyValueDB};
use log::{info, trace, warn}; use log::{info, trace, warn};
use parity_bytes::Bytes; use parity_bytes::Bytes;
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use rayon::prelude::*; use rayon::prelude::*;
use rlp::RlpStream; use rlp::RlpStream;
use rlp_compress::{blocks_swapper, compress, decompress}; use rlp_compress::{blocks_swapper, compress, decompress};
use stats::PrometheusMetrics;
use crate::{ use crate::{
best_block::{BestAncientBlock, BestBlock}, best_block::{BestAncientBlock, BestBlock},
@ -66,7 +67,7 @@ use crate::{
}; };
/// Database backing `BlockChain`. /// Database backing `BlockChain`.
pub trait BlockChainDB: Send + Sync { pub trait BlockChainDB: Send + Sync + PrometheusMetrics {
/// Generic key value store. /// Generic key value store.
fn key_value(&self) -> &Arc<dyn KeyValueDB>; fn key_value(&self) -> &Arc<dyn KeyValueDB>;
@ -1950,6 +1951,9 @@ mod tests {
&self.trace_blooms &self.trace_blooms
} }
} }
impl PrometheusMetrics for TestBlockChainDB {
fn prometheus_metrics(&self, _: &mut stats::PrometheusRegistry) {}
}
/// Creates new test instance of `BlockChainDB` /// Creates new test instance of `BlockChainDB`
pub fn new_db() -> Arc<dyn BlockChainDB> { pub fn new_db() -> Arc<dyn BlockChainDB> {
@ -1961,7 +1965,9 @@ mod tests {
trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(), trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(),
_blooms_dir: blooms_dir, _blooms_dir: blooms_dir,
_trace_blooms_dir: trace_blooms_dir, _trace_blooms_dir: trace_blooms_dir,
key_value: Arc::new(kvdb_memorydb::create(ethcore_db::NUM_COLUMNS.unwrap())), key_value: Arc::new(ethcore_db::InMemoryWithMetrics::create(
ethcore_db::NUM_COLUMNS.unwrap(),
)),
}; };
Arc::new(db) Arc::new(db)

View File

@ -33,12 +33,12 @@ use blockchain::{
}; };
use bytes::{Bytes, ToPretty}; use bytes::{Bytes, ToPretty};
use call_contract::CallContract; use call_contract::CallContract;
use db::{DBTransaction, DBValue, KeyValueDB};
use error::Error; use error::Error;
use ethcore_miner::pool::VerifiedTransaction; use ethcore_miner::pool::VerifiedTransaction;
use ethereum_types::{Address, H256, H264, U256}; use ethereum_types::{Address, H256, H264, U256};
use hash::keccak; use hash::keccak;
use itertools::Itertools; use itertools::Itertools;
use kvdb::{DBTransaction, DBValue, KeyValueDB};
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use rand::OsRng; use rand::OsRng;
use rlp::{PayloadInfo, Rlp}; use rlp::{PayloadInfo, Rlp};
@ -88,7 +88,7 @@ use snapshot::{self, io as snapshot_io, SnapshotClient};
use spec::Spec; use spec::Spec;
use state::{self, State}; use state::{self, State};
use state_db::StateDB; use state_db::StateDB;
use stats::{prometheus, prometheus_counter, prometheus_gauge, PrometheusMetrics}; use stats::{PrometheusMetrics, PrometheusRegistry};
use trace::{ use trace::{
self, Database as TraceDatabase, ImportRequest as TraceImportRequest, LocalizedTrace, TraceDB, self, Database as TraceDatabase, ImportRequest as TraceImportRequest, LocalizedTrace, TraceDB,
}; };
@ -3236,41 +3236,36 @@ impl IoChannelQueue {
} }
impl PrometheusMetrics for Client { impl PrometheusMetrics for Client {
fn prometheus_metrics(&self, r: &mut prometheus::Registry) { fn prometheus_metrics(&self, r: &mut PrometheusRegistry) {
// gas, tx & blocks // gas, tx & blocks
let report = self.report(); let report = self.report();
for (key, value) in report.item_sizes.iter() { for (key, value) in report.item_sizes.iter() {
prometheus_gauge( r.register_gauge(
r,
&key, &key,
format!("Total item number of {}", key).as_str(), format!("Total item number of {}", key).as_str(),
*value as i64, *value as i64,
); );
} }
prometheus_counter( r.register_counter(
r,
"import_gas", "import_gas",
"Gas processed", "Gas processed",
report.gas_processed.as_u64() as i64, report.gas_processed.as_u64() as i64,
); );
prometheus_counter( r.register_counter(
r,
"import_blocks", "import_blocks",
"Blocks imported", "Blocks imported",
report.blocks_imported as i64, report.blocks_imported as i64,
); );
prometheus_counter( r.register_counter(
r,
"import_txs", "import_txs",
"Transactions applied", "Transactions applied",
report.transactions_applied as i64, report.transactions_applied as i64,
); );
let state_db = self.state_db.read(); let state_db = self.state_db.read();
prometheus_gauge( r.register_gauge(
r,
"statedb_cache_size", "statedb_cache_size",
"State DB cache size", "State DB cache size",
state_db.cache_size() as i64, state_db.cache_size() as i64,
@ -3278,32 +3273,27 @@ impl PrometheusMetrics for Client {
// blockchain cache // blockchain cache
let blockchain_cache_info = self.blockchain_cache_info(); let blockchain_cache_info = self.blockchain_cache_info();
prometheus_gauge( r.register_gauge(
r,
"blockchaincache_block_details", "blockchaincache_block_details",
"BlockDetails cache size", "BlockDetails cache size",
blockchain_cache_info.block_details as i64, blockchain_cache_info.block_details as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"blockchaincache_block_recipts", "blockchaincache_block_recipts",
"Block receipts size", "Block receipts size",
blockchain_cache_info.block_receipts as i64, blockchain_cache_info.block_receipts as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"blockchaincache_blocks", "blockchaincache_blocks",
"Blocks cache size", "Blocks cache size",
blockchain_cache_info.blocks as i64, blockchain_cache_info.blocks as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"blockchaincache_txaddrs", "blockchaincache_txaddrs",
"Transaction addresses cache size", "Transaction addresses cache size",
blockchain_cache_info.transaction_addresses as i64, blockchain_cache_info.transaction_addresses as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"blockchaincache_size", "blockchaincache_size",
"Total blockchain cache size", "Total blockchain cache size",
blockchain_cache_info.total() as i64, blockchain_cache_info.total() as i64,
@ -3321,22 +3311,19 @@ impl PrometheusMetrics for Client {
.map(|last| (first, U256::from(last))) .map(|last| (first, U256::from(last)))
}); });
if let Some((first, last)) = gap { if let Some((first, last)) = gap {
prometheus_gauge( r.register_gauge(
r,
"chain_warpsync_gap_first", "chain_warpsync_gap_first",
"Warp sync gap, first block", "Warp sync gap, first block",
first.as_u64() as i64, first.as_u64() as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"chain_warpsync_gap_last", "chain_warpsync_gap_last",
"Warp sync gap, last block", "Warp sync gap, last block",
last.as_u64() as i64, last.as_u64() as i64,
); );
} }
prometheus_gauge( r.register_gauge(
r,
"chain_block", "chain_block",
"Best block number", "Best block number",
chain.best_block_number as i64, chain.best_block_number as i64,
@ -3344,14 +3331,12 @@ impl PrometheusMetrics for Client {
// prunning info // prunning info
let prunning = self.pruning_info(); let prunning = self.pruning_info();
prometheus_gauge( r.register_gauge(
r,
"prunning_earliest_chain", "prunning_earliest_chain",
"The first block which everything can be served after", "The first block which everything can be served after",
prunning.earliest_chain as i64, prunning.earliest_chain as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"prunning_earliest_state", "prunning_earliest_state",
"The first block where state requests may be served", "The first block where state requests may be served",
prunning.earliest_state as i64, prunning.earliest_state as i64,
@ -3359,36 +3344,34 @@ impl PrometheusMetrics for Client {
// queue info // queue info
let queue = self.queue_info(); let queue = self.queue_info();
prometheus_gauge( r.register_gauge(
r,
"queue_mem_used", "queue_mem_used",
"Queue heap memory used in bytes", "Queue heap memory used in bytes",
queue.mem_used as i64, queue.mem_used as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"queue_size_total", "queue_size_total",
"The total size of the queues", "The total size of the queues",
queue.total_queue_size() as i64, queue.total_queue_size() as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"queue_size_unverified", "queue_size_unverified",
"Number of queued items pending verification", "Number of queued items pending verification",
queue.unverified_queue_size as i64, queue.unverified_queue_size as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"queue_size_verified", "queue_size_verified",
"Number of verified queued items pending import", "Number of verified queued items pending import",
queue.verified_queue_size as i64, queue.verified_queue_size as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"queue_size_verifying", "queue_size_verifying",
"Number of items being verified", "Number of items being verified",
queue.verifying_queue_size as i64, queue.verifying_queue_size as i64,
); );
// database info
self.db.read().key_value().prometheus_metrics(r);
} }
} }

View File

@ -25,7 +25,6 @@ use executive;
use factory::{self, Factories}; use factory::{self, Factories};
use journaldb; use journaldb;
use kvdb::{self, KeyValueDB}; use kvdb::{self, KeyValueDB};
use kvdb_memorydb;
use pod_state; use pod_state;
use spec; use spec;
use state; use state;
@ -181,7 +180,7 @@ impl<'a> EvmTestClient<'a> {
spec: &'a spec::Spec, spec: &'a spec::Spec,
factories: &Factories, factories: &Factories,
) -> Result<state::State<state_db::StateDB>, EvmTestError> { ) -> Result<state::State<state_db::StateDB>, EvmTestError> {
let db = Arc::new(kvdb_memorydb::create( let db = Arc::new(ethcore_db::InMemoryWithMetrics::create(
db::NUM_COLUMNS.expect("We use column-based DB; qed"), db::NUM_COLUMNS.expect("We use column-based DB; qed"),
)); ));
let journal_db = let journal_db =
@ -211,7 +210,7 @@ impl<'a> EvmTestClient<'a> {
factories: &Factories, factories: &Factories,
pod_state: pod_state::PodState, pod_state: pod_state::PodState,
) -> Result<state::State<state_db::StateDB>, EvmTestError> { ) -> Result<state::State<state_db::StateDB>, EvmTestError> {
let db = Arc::new(kvdb_memorydb::create( let db = Arc::new(ethcore_db::InMemoryWithMetrics::create(
db::NUM_COLUMNS.expect("We use column-based DB; qed"), db::NUM_COLUMNS.expect("We use column-based DB; qed"),
)); ));
let journal_db = let journal_db =

View File

@ -34,7 +34,6 @@ use ethtrie;
use hash::keccak; use hash::keccak;
use itertools::Itertools; use itertools::Itertools;
use kvdb::DBValue; use kvdb::DBValue;
use kvdb_memorydb;
use parking_lot::RwLock; use parking_lot::RwLock;
use rlp::RlpStream; use rlp::RlpStream;
use rustc_hex::FromHex; use rustc_hex::FromHex;
@ -75,7 +74,7 @@ use miner::{self, Miner, MinerService};
use spec::Spec; use spec::Spec;
use state::StateInfo; use state::StateInfo;
use state_db::StateDB; use state_db::StateDB;
use stats::{prometheus, PrometheusMetrics}; use stats::{PrometheusMetrics, PrometheusRegistry};
use trace::LocalizedTrace; use trace::LocalizedTrace;
use verification::queue::{kind::blocks::Unverified, QueueInfo}; use verification::queue::{kind::blocks::Unverified, QueueInfo};
@ -409,7 +408,7 @@ impl TestBlockChainClient {
/// Get temporary db state1 /// Get temporary db state1
pub fn get_temp_state_db() -> StateDB { pub fn get_temp_state_db() -> StateDB {
let db = kvdb_memorydb::create(NUM_COLUMNS.unwrap_or(0)); let db = ethcore_db::InMemoryWithMetrics::create(NUM_COLUMNS.unwrap_or(0));
let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE); let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE);
StateDB::new(journal_db, 1024 * 1024) StateDB::new(journal_db, 1024 * 1024)
} }
@ -1132,5 +1131,5 @@ impl super::traits::EngineClient for TestBlockChainClient {
} }
impl PrometheusMetrics for TestBlockChainClient { impl PrometheusMetrics for TestBlockChainClient {
fn prometheus_metrics(&self, _r: &mut prometheus::Registry) {} fn prometheus_metrics(&self, _r: &mut PrometheusRegistry) {}
} }

View File

@ -73,8 +73,6 @@ extern crate ethcore_accounts as accounts;
extern crate ethcore_stratum; extern crate ethcore_stratum;
#[cfg(feature = "json-tests")] #[cfg(feature = "json-tests")]
extern crate globset; extern crate globset;
#[cfg(any(test, feature = "test-helpers"))]
extern crate kvdb_memorydb;
#[cfg(any(test, feature = "kvdb-rocksdb"))] #[cfg(any(test, feature = "kvdb-rocksdb"))]
extern crate kvdb_rocksdb; extern crate kvdb_rocksdb;
#[cfg(test)] #[cfg(test)]

View File

@ -32,9 +32,9 @@ use snapshot::{Error, ManifestData, Progress};
use blockchain::{BlockChain, BlockChainDB, BlockProvider}; use blockchain::{BlockChain, BlockChainDB, BlockProvider};
use bytes::Bytes; use bytes::Bytes;
use db::KeyValueDB;
use ethereum_types::{H256, U256}; use ethereum_types::{H256, U256};
use itertools::{Itertools, Position}; use itertools::{Itertools, Position};
use kvdb::KeyValueDB;
use rlp::{Rlp, RlpStream}; use rlp::{Rlp, RlpStream};
use types::{ use types::{
encoded, header::Header, ids::BlockId, receipt::TypedReceipt, transaction::TypedTransaction, encoded, header::Header, ids::BlockId, receipt::TypedReceipt, transaction::TypedTransaction,

View File

@ -32,9 +32,9 @@ use std::{
use blockchain::{BlockChain, BlockChainDB, BlockProvider}; use blockchain::{BlockChain, BlockChainDB, BlockProvider};
use bytes::Bytes; use bytes::Bytes;
use db::KeyValueDB;
use engines::EthEngine; use engines::EthEngine;
use ethereum_types::H256; use ethereum_types::H256;
use kvdb::KeyValueDB;
use rand::OsRng; use rand::OsRng;
use rlp::{Rlp, RlpStream}; use rlp::{Rlp, RlpStream};
use snapshot::{block::AbridgedBlock, Error, ManifestData, Progress}; use snapshot::{block::AbridgedBlock, Error, ManifestData, Progress};

View File

@ -35,12 +35,12 @@ use engines::EthEngine;
use types::{header::Header, ids::BlockId}; use types::{header::Header, ids::BlockId};
use bytes::Bytes; use bytes::Bytes;
use db::{DBValue, KeyValueDB};
use ethereum_types::H256; use ethereum_types::H256;
use ethtrie::{TrieDB, TrieDBMut}; use ethtrie::{TrieDB, TrieDBMut};
use hash_db::HashDB; use hash_db::HashDB;
use journaldb::{self, Algorithm, JournalDB}; use journaldb::{self, Algorithm, JournalDB};
use keccak_hasher::KeccakHasher; use keccak_hasher::KeccakHasher;
use kvdb::{DBValue, KeyValueDB};
use num_cpus; use num_cpus;
use parking_lot::Mutex; use parking_lot::Mutex;
use rlp::{Rlp, RlpStream}; use rlp::{Rlp, RlpStream};

View File

@ -82,8 +82,11 @@ fn snap_and_restore() {
let db_path = tempdir.path().join("db"); let db_path = tempdir.path().join("db");
let db = { let db = {
let new_db = Arc::new(Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap()); let new_db = Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap();
let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent); let new_db_with_metrics: Arc<dyn ethcore_db::KeyValueDB> =
Arc::new(ethcore_db::DatabaseWithMetrics::new(new_db));
let mut rebuilder =
StateRebuilder::new(new_db_with_metrics.clone(), Algorithm::OverlayRecent);
let reader = PackedReader::new(&snap_file).unwrap().unwrap(); let reader = PackedReader::new(&snap_file).unwrap().unwrap();
let flag = AtomicBool::new(true); let flag = AtomicBool::new(true);
@ -98,7 +101,7 @@ fn snap_and_restore() {
assert_eq!(rebuilder.state_root(), state_root); assert_eq!(rebuilder.state_root(), state_root);
rebuilder.finalize(1000, H256::default()).unwrap(); rebuilder.finalize(1000, H256::default()).unwrap();
new_db new_db_with_metrics
}; };
let new_db = journaldb::new(db, Algorithm::OverlayRecent, ::db::COL_STATE); let new_db = journaldb::new(db, Algorithm::OverlayRecent, ::db::COL_STATE);
@ -163,10 +166,11 @@ fn get_code_from_prev_chunk() {
let tempdir = TempDir::new("").unwrap(); let tempdir = TempDir::new("").unwrap();
let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); let db_cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let new_db = Arc::new(Database::open(&db_cfg, tempdir.path().to_str().unwrap()).unwrap()); let new_db = Database::open(&db_cfg, tempdir.path().to_str().unwrap()).unwrap();
let new_db_with_metrics = Arc::new(db::DatabaseWithMetrics::new(new_db));
{ {
let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent); let mut rebuilder =
StateRebuilder::new(new_db_with_metrics.clone(), Algorithm::OverlayRecent);
let flag = AtomicBool::new(true); let flag = AtomicBool::new(true);
rebuilder.feed(&chunk1, &flag).unwrap(); rebuilder.feed(&chunk1, &flag).unwrap();
@ -175,7 +179,11 @@ fn get_code_from_prev_chunk() {
rebuilder.finalize(1000, H256::random()).unwrap(); rebuilder.finalize(1000, H256::random()).unwrap();
} }
let state_db = journaldb::new(new_db, Algorithm::OverlayRecent, ::db::COL_STATE); let state_db = journaldb::new(
new_db_with_metrics,
Algorithm::OverlayRecent,
::db::COL_STATE,
);
assert_eq!(state_db.earliest_era(), Some(1000)); assert_eq!(state_db.earliest_era(), Some(1000));
} }
@ -214,8 +222,10 @@ fn checks_flag() {
let tempdir = TempDir::new("").unwrap(); let tempdir = TempDir::new("").unwrap();
let db_path = tempdir.path().join("db"); let db_path = tempdir.path().join("db");
{ {
let new_db = Arc::new(Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap()); let new_db = Database::open(&db_cfg, &db_path.to_string_lossy()).unwrap();
let mut rebuilder = StateRebuilder::new(new_db.clone(), Algorithm::OverlayRecent); let new_db_with_metrics = Arc::new(db::DatabaseWithMetrics::new(new_db));
let mut rebuilder =
StateRebuilder::new(new_db_with_metrics.clone(), Algorithm::OverlayRecent);
let reader = PackedReader::new(&snap_file).unwrap().unwrap(); let reader = PackedReader::new(&snap_file).unwrap().unwrap();
let flag = AtomicBool::new(false); let flag = AtomicBool::new(false);

View File

@ -970,7 +970,7 @@ impl Spec {
let factories = Default::default(); let factories = Default::default();
let mut db = journaldb::new( let mut db = journaldb::new(
Arc::new(kvdb_memorydb::create(0)), Arc::new(db::InMemoryWithMetrics::create(0)),
journaldb::Algorithm::Archive, journaldb::Algorithm::Archive,
None, None,
); );

View File

@ -23,12 +23,12 @@ use blockchain::{
}; };
use blooms_db; use blooms_db;
use bytes::Bytes; use bytes::Bytes;
use db::KeyValueDB;
use ethereum_types::{Address, H256, U256}; use ethereum_types::{Address, H256, U256};
use ethkey::KeyPair; use ethkey::KeyPair;
use evm::Factory as EvmFactory; use evm::Factory as EvmFactory;
use hash::keccak; use hash::keccak;
use io::IoChannel; use io::IoChannel;
use kvdb::KeyValueDB;
use kvdb_rocksdb::{self, Database, DatabaseConfig}; use kvdb_rocksdb::{self, Database, DatabaseConfig};
use parking_lot::RwLock; use parking_lot::RwLock;
use rlp::{self, RlpStream}; use rlp::{self, RlpStream};
@ -350,6 +350,10 @@ impl BlockChainDB for TestBlockChainDB {
} }
} }
impl stats::PrometheusMetrics for TestBlockChainDB {
fn prometheus_metrics(&self, _: &mut stats::PrometheusRegistry) {}
}
/// Creates new test instance of `BlockChainDB` /// Creates new test instance of `BlockChainDB`
pub fn new_db() -> Arc<dyn BlockChainDB> { pub fn new_db() -> Arc<dyn BlockChainDB> {
let blooms_dir = TempDir::new("").unwrap(); let blooms_dir = TempDir::new("").unwrap();
@ -360,7 +364,9 @@ pub fn new_db() -> Arc<dyn BlockChainDB> {
trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(), trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(),
_blooms_dir: blooms_dir, _blooms_dir: blooms_dir,
_trace_blooms_dir: trace_blooms_dir, _trace_blooms_dir: trace_blooms_dir,
key_value: Arc::new(::kvdb_memorydb::create(::db::NUM_COLUMNS.unwrap())), key_value: Arc::new(ethcore_db::InMemoryWithMetrics::create(
::db::NUM_COLUMNS.unwrap(),
)),
}; };
Arc::new(db) Arc::new(db)
@ -374,13 +380,13 @@ pub fn new_temp_db(tempdir: &Path) -> Arc<dyn BlockChainDB> {
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let key_value_db = Database::open(&db_config, key_value_dir.to_str().unwrap()).unwrap(); let key_value_db = Database::open(&db_config, key_value_dir.to_str().unwrap()).unwrap();
let key_value_db_with_metrics = ethcore_db::DatabaseWithMetrics::new(key_value_db);
let db = TestBlockChainDB { let db = TestBlockChainDB {
blooms: blooms_db::Database::open(blooms_dir.path()).unwrap(), blooms: blooms_db::Database::open(blooms_dir.path()).unwrap(),
trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(), trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(),
_blooms_dir: blooms_dir, _blooms_dir: blooms_dir,
_trace_blooms_dir: trace_blooms_dir, _trace_blooms_dir: trace_blooms_dir,
key_value: Arc::new(key_value_db), key_value: Arc::new(key_value_db_with_metrics),
}; };
Arc::new(db) Arc::new(db)
@ -413,13 +419,14 @@ pub fn restoration_db_handler(
&self.trace_blooms &self.trace_blooms
} }
} }
impl stats::PrometheusMetrics for RestorationDB {
fn prometheus_metrics(&self, _: &mut stats::PrometheusRegistry) {}
}
impl BlockChainDBHandler for RestorationDBHandler { impl BlockChainDBHandler for RestorationDBHandler {
fn open(&self, db_path: &Path) -> io::Result<Arc<dyn BlockChainDB>> { fn open(&self, db_path: &Path) -> io::Result<Arc<dyn BlockChainDB>> {
let key_value = Arc::new(kvdb_rocksdb::Database::open( let key_value = kvdb_rocksdb::Database::open(&self.config, &db_path.to_string_lossy())?;
&self.config, let key_value = Arc::new(db::DatabaseWithMetrics::new(key_value));
&db_path.to_string_lossy(),
)?);
let blooms_path = db_path.join("blooms"); let blooms_path = db_path.join("blooms");
let trace_blooms_path = db_path.join("trace_blooms"); let trace_blooms_path = db_path.join("trace_blooms");
fs::create_dir_all(&blooms_path)?; fs::create_dir_all(&blooms_path)?;

View File

@ -42,7 +42,7 @@ use ethkey::Secret;
use io::TimerToken; use io::TimerToken;
use network::IpFilter; use network::IpFilter;
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use stats::{prometheus, prometheus_counter, prometheus_gauge, PrometheusMetrics}; use stats::{PrometheusMetrics, PrometheusRegistry};
use std::{ use std::{
net::{AddrParseError, SocketAddr}, net::{AddrParseError, SocketAddr},
@ -323,11 +323,11 @@ impl SyncProvider for EthSync {
} }
impl PrometheusMetrics for EthSync { impl PrometheusMetrics for EthSync {
fn prometheus_metrics(&self, r: &mut prometheus::Registry) { fn prometheus_metrics(&self, r: &mut PrometheusRegistry) {
let scalar = |b| if b { 1i64 } else { 0i64 }; let scalar = |b| if b { 1i64 } else { 0i64 };
let sync_status = self.status(); let sync_status = self.status();
prometheus_gauge(r, r.register_gauge(
"sync_status", "sync_status",
"WaitingPeers(0), SnapshotManifest(1), SnapshotData(2), SnapshotWaiting(3), Blocks(4), Idle(5), Waiting(6), NewBlocks(7)", "WaitingPeers(0), SnapshotManifest(1), SnapshotData(2), SnapshotWaiting(3), Blocks(4), Idle(5), Waiting(6), NewBlocks(7)",
match self.eth_handler.sync.status().state { match self.eth_handler.sync.status().state {
@ -342,59 +342,50 @@ impl PrometheusMetrics for EthSync {
}); });
for (key, value) in sync_status.item_sizes.iter() { for (key, value) in sync_status.item_sizes.iter() {
prometheus_gauge( r.register_gauge(
r,
&key, &key,
format!("Total item number of {}", key).as_str(), format!("Total item number of {}", key).as_str(),
*value as i64, *value as i64,
); );
} }
prometheus_gauge( r.register_gauge(
r,
"net_peers", "net_peers",
"Total number of connected peers", "Total number of connected peers",
sync_status.num_peers as i64, sync_status.num_peers as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"net_active_peers", "net_active_peers",
"Total number of active peers", "Total number of active peers",
sync_status.num_active_peers as i64, sync_status.num_active_peers as i64,
); );
prometheus_counter( r.register_counter(
r,
"sync_blocks_recieved", "sync_blocks_recieved",
"Number of blocks downloaded so far", "Number of blocks downloaded so far",
sync_status.blocks_received as i64, sync_status.blocks_received as i64,
); );
prometheus_counter( r.register_counter(
r,
"sync_blocks_total", "sync_blocks_total",
"Total number of blocks for the sync process", "Total number of blocks for the sync process",
sync_status.blocks_total as i64, sync_status.blocks_total as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"sync_blocks_highest", "sync_blocks_highest",
"Highest block number in the download queue", "Highest block number in the download queue",
sync_status.highest_block_number.unwrap_or(0) as i64, sync_status.highest_block_number.unwrap_or(0) as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"snapshot_download_active", "snapshot_download_active",
"1 if downloading snapshots", "1 if downloading snapshots",
scalar(sync_status.is_snapshot_syncing()), scalar(sync_status.is_snapshot_syncing()),
); );
prometheus_gauge( r.register_gauge(
r,
"snapshot_download_chunks", "snapshot_download_chunks",
"Snapshot chunks", "Snapshot chunks",
sync_status.num_snapshot_chunks as i64, sync_status.num_snapshot_chunks as i64,
); );
prometheus_gauge( r.register_gauge(
r,
"snapshot_download_chunks_done", "snapshot_download_chunks_done",
"Snapshot chunks downloaded", "Snapshot chunks downloaded",
sync_status.snapshot_chunks_done as i64, sync_status.snapshot_chunks_done as i64,
@ -408,8 +399,7 @@ impl PrometheusMetrics for EthSync {
.manifest_block() .manifest_block()
.unwrap_or((0, H256::zero())); .unwrap_or((0, H256::zero()));
prometheus_gauge( r.register_gauge(
r,
"snapshot_create_block", "snapshot_create_block",
"First block of the current snapshot creation", "First block of the current snapshot creation",
if let CreationStatus::Ongoing { block_number } = creation { if let CreationStatus::Ongoing { block_number } = creation {
@ -418,8 +408,7 @@ impl PrometheusMetrics for EthSync {
0 0
}, },
); );
prometheus_gauge( r.register_gauge(
r,
"snapshot_restore_block", "snapshot_restore_block",
"First block of the current snapshot restoration", "First block of the current snapshot restoration",
if let RestorationStatus::Ongoing { block_number, .. } = restoration { if let RestorationStatus::Ongoing { block_number, .. } = restoration {
@ -428,8 +417,7 @@ impl PrometheusMetrics for EthSync {
0 0
}, },
); );
prometheus_gauge( r.register_gauge(
r,
"snapshot_manifest_block", "snapshot_manifest_block",
"First block number of the present snapshot", "First block number of the present snapshot",
manifest_block_num as i64, manifest_block_num as i64,

View File

@ -19,7 +19,7 @@
use ethereum_types::H256; use ethereum_types::H256;
use network::client_version::ClientVersion; use network::client_version::ClientVersion;
use parking_lot::RwLock; use parking_lot::RwLock;
use stats::{prometheus, PrometheusMetrics}; use stats::{PrometheusMetrics, PrometheusRegistry};
use std::collections::BTreeMap; use std::collections::BTreeMap;
use sync::{EthProtocolInfo, PeerInfo, SyncProvider, SyncState, SyncStatus, TransactionStats}; use sync::{EthProtocolInfo, PeerInfo, SyncProvider, SyncState, SyncStatus, TransactionStats};
@ -69,7 +69,7 @@ impl TestSyncProvider {
} }
impl PrometheusMetrics for TestSyncProvider { impl PrometheusMetrics for TestSyncProvider {
fn prometheus_metrics(&self, _: &mut prometheus::Registry) {} fn prometheus_metrics(&self, _: &mut PrometheusRegistry) {}
} }
impl SyncProvider for TestSyncProvider { impl SyncProvider for TestSyncProvider {

View File

@ -26,39 +26,64 @@ use std::{
extern crate log; extern crate log;
pub extern crate prometheus; pub extern crate prometheus;
pub struct PrometheusRegistry {
prefix: String,
registry: prometheus::Registry,
}
impl PrometheusRegistry {
/// Create a new instance with the specified prefix
pub fn new(prefix: String) -> Self {
Self {
prefix,
registry: prometheus::Registry::new(),
}
}
/// Get internal registry
pub fn registry(&self) -> &prometheus::Registry {
&self.registry
}
/// Adds a new prometheus counter with the specified value
pub fn register_counter(&mut self, name: &str, help: &str, value: i64) {
let name = format!("{}{}", self.prefix, name);
let c = prometheus::IntCounter::new(name.as_str(), help)
.expect("name and help must be non-empty");
c.inc_by(value);
self.registry
.register(Box::new(c))
.expect("prometheus identifiers must be unique");
}
/// Adds a new prometheus gauge with the specified gauge
pub fn register_gauge(&mut self, name: &str, help: &str, value: i64) {
let name = format!("{}{}", self.prefix, name);
let g = prometheus::IntGauge::new(name.as_str(), help)
.expect("name and help must be non-empty");
g.set(value);
self.registry
.register(Box::new(g))
.expect("prometheus identifiers must be are unique");
}
/// Adds a new prometheus counter with the time spent in running the specified function
pub fn register_optime<F: Fn() -> T, T>(&mut self, name: &str, f: &F) -> T {
let start = Instant::now();
let t = f();
let elapsed = start.elapsed();
self.register_gauge(
&format!("optime_{}", name),
&format!("Time to perform {}", name),
elapsed.as_millis() as i64,
);
t
}
}
/// Implements a prometheus metrics collector /// Implements a prometheus metrics collector
pub trait PrometheusMetrics { pub trait PrometheusMetrics {
fn prometheus_metrics(&self, registry: &mut prometheus::Registry); fn prometheus_metrics(&self, registry: &mut PrometheusRegistry);
}
/// Adds a new prometheus counter with the specified value
pub fn prometheus_counter(reg: &mut prometheus::Registry, name: &str, help: &str, value: i64) {
let c = prometheus::IntCounter::new(name, help).expect("name and help must be non-empty");
c.inc_by(value);
reg.register(Box::new(c))
.expect("prometheus identifiers must be unique");
}
/// Adds a new prometheus gauge with the specified gauge
pub fn prometheus_gauge(reg: &mut prometheus::Registry, name: &str, help: &str, value: i64) {
let g = prometheus::IntGauge::new(name, help).expect("name and help must be non-empty");
g.set(value);
reg.register(Box::new(g))
.expect("prometheus identifiers must be are unique");
}
/// Adds a new prometheus counter with the time spent in running the specified function
pub fn prometheus_optime<F: Fn() -> T, T>(r: &mut prometheus::Registry, name: &str, f: &F) -> T {
let start = Instant::now();
let t = f();
let elapsed = start.elapsed();
prometheus_gauge(
r,
&format!("optime_{}", name),
&format!("Time to perform {}", name),
elapsed.as_millis() as i64,
);
t
} }
/// Sorted corpus of data. /// Sorted corpus of data.