Unverified Commit 807a2005 authored by Roman Zeyde's avatar Roman Zeyde
Browse files

Fix some Clippy warnings

parent 182aba84
......@@ -40,17 +40,15 @@ fn run_server(config: &Config) -> Result<()> {
let index = Index::load(&store, &daemon, &metrics, config.index_batch_size)?;
let store = if is_fully_compacted(&store) {
store // initial import and full compaction are over
} else if config.jsonrpc_import {
index.update(&store, &signal)?; // slower: uses JSONRPC for fetching blocks
full_compaction(store)
} else {
if config.jsonrpc_import {
index.update(&store, &signal)?; // slower: uses JSONRPC for fetching blocks
full_compaction(store)
} else {
// faster, but uses more memory
let store = bulk::index_blk_files(&daemon, config.bulk_index_threads, &metrics, store)?;
let store = full_compaction(store);
index.reload(&store); // make sure the block header index is up-to-date
store
}
// faster, but uses more memory
let store = bulk::index_blk_files(&daemon, config.bulk_index_threads, &metrics, store)?;
let store = full_compaction(store);
index.reload(&store); // make sure the block header index is up-to-date
store
}
.enable_compaction(); // enable auto compactions before starting incremental index updates.
......
......@@ -77,7 +77,7 @@ impl Parser {
let blob = fs::read(&path).chain_err(|| format!("failed to read {:?}", path))?;
timer.observe_duration();
self.bytes_read.observe(blob.len() as f64);
return Ok(blob);
Ok(blob)
}
fn index_blkfile(&self, blob: Vec<u8>) -> Result<Vec<Row>> {
......@@ -94,7 +94,7 @@ impl Parser {
.indexed_blockhashes
.lock()
.expect("indexed_blockhashes")
.insert(blockhash.clone())
.insert(blockhash)
{
rows.extend(index_block(&block, header.height()));
self.block_count.with_label_values(&["indexed"]).inc();
......@@ -134,7 +134,7 @@ fn parse_blocks(blob: Vec<u8>, magic: u32) -> Result<Vec<Block>> {
let block_size = u32::consensus_decode(&mut cursor).chain_err(|| "no block size")?;
let start = cursor.position() as usize;
cursor
.seek(SeekFrom::Current(block_size as i64))
.seek(SeekFrom::Current(i64::from(block_size)))
.chain_err(|| format!("seek {} failed", block_size))?;
let end = cursor.position() as usize;
......
......@@ -34,9 +34,9 @@ pub struct Config {
fn str_to_socketaddr(address: &str, what: &str) -> SocketAddr {
address
.to_socket_addrs()
.expect(&format!("unable to resolve {} address", what))
.unwrap_or_else(|e| panic!("unable to resolve {} address: {}", what, e))
.next()
.expect(&format!("no address found for {}", address))
.unwrap_or_else(|| panic!("no address found for {}", address))
}
impl Config {
......@@ -177,7 +177,7 @@ impl Config {
let mut daemon_dir = m
.value_of("daemon_dir")
.map(|p| PathBuf::from(p))
.map(PathBuf::from)
.unwrap_or_else(|| {
let mut default_dir = home_dir().expect("no homedir");
default_dir.push(".bitcoin");
......
......@@ -322,7 +322,7 @@ impl Daemon {
};
let network_info = daemon.getnetworkinfo()?;
info!("{:?}", network_info);
if network_info.version < 00_16_00_00 {
if network_info.version < 16_00_00 {
bail!(
"{} is not supported - please use bitcoind 0.16+",
network_info.subversion,
......@@ -330,11 +330,11 @@ impl Daemon {
}
let blockchain_info = daemon.getblockchaininfo()?;
info!("{:?}", blockchain_info);
if blockchain_info.pruned == true {
if blockchain_info.pruned {
bail!("pruned node is not supported (use '-prune=0' bitcoind flag)".to_owned())
}
loop {
if daemon.getblockchaininfo()?.initialblockdownload == false {
if !daemon.getblockchaininfo()?.initialblockdownload {
break;
}
warn!("wait until bitcoind is synced (i.e. initialblockdownload = false)");
......@@ -585,7 +585,7 @@ impl Daemon {
.expect("missing height")
.as_u64()
.expect("non-numeric height") as usize;
let all_heights: Vec<usize> = (0..tip_height + 1).collect();
let all_heights: Vec<usize> = (0..=tip_height).collect();
let chunk_size = 100_000;
let mut result = vec![];
let null_hash = Sha256dHash::default();
......
......@@ -129,7 +129,7 @@ impl TxRow {
code: b'T',
txid: full_hash(&txid[..]),
},
height: height,
height,
}
}
......@@ -239,7 +239,7 @@ fn read_indexed_headers(store: &ReadStore) -> HeaderList {
while blockhash != null_hash {
let header = map
.remove(&blockhash)
.expect(&format!("missing {} header in DB", blockhash));
.unwrap_or_else(|| panic!("missing {} header in DB", blockhash));
blockhash = header.prev_blockhash;
headers.push(header);
}
......@@ -363,9 +363,9 @@ impl Index {
let indexed_headers = self.headers.read().unwrap();
indexed_headers.order(daemon.get_new_headers(&indexed_headers, &tip)?)
};
new_headers.last().map(|tip| {
info!("{:?} ({} left to index)", tip, new_headers.len());
});
if let Some(latest_header) = new_headers.last() {
info!("{:?} ({} left to index)", latest_header, new_headers.len());
};
let height_map = HashMap::<Sha256dHash, usize>::from_iter(
new_headers.iter().map(|h| (*h.hash(), h.height())),
);
......@@ -401,7 +401,7 @@ impl Index {
let blockhash = block.bitcoin_hash();
let height = *height_map
.get(&blockhash)
.expect(&format!("missing header for block {}", blockhash));
.unwrap_or_else(|| panic!("missing header for block {}", blockhash));
let timer = self.stats.start_timer("index");
let mut block_rows = index_block(block, height);
......
......@@ -33,7 +33,7 @@ impl MempoolStore {
index_transaction(tx, 0, &mut rows);
for row in rows {
let (key, value) = row.into_pair();
self.map.entry(key).or_insert(vec![]).push(value);
self.map.entry(key).or_insert_with(|| vec![]).push(value);
}
}
......@@ -46,10 +46,10 @@ impl MempoolStore {
let values = self
.map
.get_mut(&key)
.expect(&format!("missing key {} in mempool", hex::encode(&key)));
.unwrap_or_else(|| panic!("missing key {} in mempool", hex::encode(&key)));
let last_value = values
.pop()
.expect(&format!("no values found for key {}", hex::encode(&key)));
.unwrap_or_else(|| panic!("no values found for key {}", hex::encode(&key)));
// TxInRow and TxOutRow have an empty value, TxRow has height=0 as value.
assert_eq!(
value,
......@@ -133,7 +133,9 @@ impl Stats {
for (fee_rate, vsize) in bands {
// labels should be ordered by fee_rate value
let label = format!("≤{:10.0}", fee_rate);
self.vsize.with_label_values(&[&label]).set(vsize as f64);
self.vsize
.with_label_values(&[&label])
.set(f64::from(vsize));
}
}
}
......@@ -251,7 +253,7 @@ impl Tracker {
let stats = self
.items
.remove(txid)
.expect(&format!("missing mempool tx {}", txid));
.unwrap_or_else(|| panic!("missing mempool tx {}", txid));
self.index.remove(&stats.tx);
}
......
......@@ -66,10 +66,12 @@ impl Metrics {
}
pub fn start(&self) {
let server = tiny_http::Server::http(self.addr).expect(&format!(
"failed to start monitoring HTTP server at {}",
self.addr
));
let server = tiny_http::Server::http(self.addr).unwrap_or_else(|e| {
panic!(
"failed to start monitoring HTTP server at {}: {}",
self.addr, e
)
});
start_process_exporter(&self);
let reg = self.reg.clone();
spawn_thread("metrics", move || loop {
......
......@@ -82,7 +82,7 @@ impl Status {
outputs_map.insert((f.txn_id, f.output_index), f);
}
for s in self.spending() {
if let None = outputs_map.remove(&s.funding_output) {
if outputs_map.remove(&s.funding_output).is_none() {
warn!("failed to remove {:?}", s.funding_output);
}
}
......@@ -128,12 +128,12 @@ fn create_merkle_branch_and_root(
let mut merkle = vec![];
while hashes.len() > 1 {
if hashes.len() % 2 != 0 {
let last = hashes.last().unwrap().clone();
let last = *hashes.last().unwrap();
hashes.push(last);
}
index = if index % 2 == 0 { index + 1 } else { index - 1 };
merkle.push(hashes[index]);
index = index / 2;
index /= 2;
hashes = hashes
.chunks(2)
.map(|pair| merklize(pair[0], pair[1]))
......@@ -282,7 +282,7 @@ impl Query {
for (index, output) in t.txn.output.iter().enumerate() {
if compute_script_hash(&output.script_pubkey[..]) == script_hash {
result.push(FundingOutput {
txn_id: txn_id,
txn_id,
height: t.height,
output_index: index,
value: output.value,
......@@ -301,13 +301,11 @@ impl Query {
let read_store = self.app.read_store();
let txid_prefixes = txids_by_script_hash(read_store, script_hash);
// if the limit is enabled
if self.txid_limit > 0 {
if txid_prefixes.len() > self.txid_limit {
bail!(
"{}+ transactions found, query may take a long time",
txid_prefixes.len()
);
}
if self.txid_limit > 0 && txid_prefixes.len() > self.txid_limit {
bail!(
"{}+ transactions found, query may take a long time",
txid_prefixes.len()
);
}
for t in self.load_txns_by_prefix(read_store, txid_prefixes)? {
funding.extend(self.find_funding_outputs(&t, script_hash));
......@@ -442,7 +440,7 @@ impl Query {
);
}
let heights: Vec<usize> = (0..cp_height + 1).collect();
let heights: Vec<usize> = (0..=cp_height).collect();
let header_hashes: Vec<Sha256dHash> = self
.get_headers(&heights)
.into_iter()
......
......@@ -140,11 +140,11 @@ impl Connection {
let branch_vec: Vec<String> = branch.into_iter().map(|b| b.to_hex()).collect();
return Ok(json!({
Ok(json!({
"header": raw_header_hex,
"root": root.to_hex(),
"branch": branch_vec
}));
}))
}
fn blockchain_block_headers(&self, params: &[Value]) -> Result<Value> {
......@@ -506,7 +506,8 @@ impl RPC {
let chan = Channel::new();
let acceptor = chan.sender();
spawn_thread("acceptor", move || {
let listener = TcpListener::bind(addr).expect(&format!("bind({}) failed", addr));
let listener =
TcpListener::bind(addr).unwrap_or_else(|e| panic!("bind({}) failed: {}", addr, e));
info!("RPC server running on {}", addr);
loop {
let (stream, addr) = listener.accept().expect("accept failed");
......@@ -531,7 +532,7 @@ impl RPC {
)),
});
let notification = Channel::new();
let handle = RPC {
RPC {
notification: notification.sender(),
server: Some(spawn_thread("rpc", move || {
let senders = Arc::new(Mutex::new(Vec::<SyncSender<Message>>::new()));
......@@ -560,8 +561,7 @@ impl RPC {
}
trace!("RPC connections are closed");
})),
};
handle
}
}
pub fn notify(&self) {
......@@ -573,7 +573,9 @@ impl Drop for RPC {
fn drop(&mut self) {
trace!("stop accepting new RPCs");
self.notification.send(Notification::Exit).unwrap();
self.server.take().map(|t| t.join().unwrap());
if let Some(handle) = self.server.take() {
handle.join().unwrap();
}
trace!("RPC server is stopped");
}
}
......@@ -50,7 +50,7 @@ impl DBStore {
db_opts.set_write_buffer_size(256 << 20);
db_opts.set_disable_auto_compactions(opts.bulk_import); // for initial bulk load
db_opts.set_advise_random_on_open(!opts.bulk_import); // bulk load uses sequential I/O
if opts.low_memory == false {
if !opts.low_memory {
db_opts.set_compaction_readahead_size(1 << 20);
}
......@@ -73,7 +73,7 @@ impl DBStore {
pub fn enable_compaction(self) -> Self {
let mut opts = self.opts.clone();
if opts.bulk_import == true {
if opts.bulk_import {
opts.bulk_import = false;
info!("enabling auto-compactions");
let opts = [("disable_auto_compactions", "false")];
......
......@@ -20,11 +20,11 @@ pub type FullHash = [u8; HASH_LEN];
pub type HashPrefix = [u8; HASH_PREFIX_LEN];
pub fn hash_prefix(hash: &[u8]) -> HashPrefix {
array_ref![hash, 0, HASH_PREFIX_LEN].clone()
*array_ref![hash, 0, HASH_PREFIX_LEN]
}
pub fn full_hash(hash: &[u8]) -> FullHash {
array_ref![hash, 0, HASH_LEN].clone()
*array_ref![hash, 0, HASH_LEN]
}
#[derive(Eq, PartialEq, Clone)]
......@@ -50,9 +50,8 @@ impl HeaderEntry {
impl fmt::Debug for HeaderEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let last_block_time = time::at_utc(time::Timespec::new(self.header().time as i64, 0))
.rfc3339()
.to_string();
let spec = time::Timespec::new(i64::from(self.header().time), 0);
let last_block_time = time::at_utc(spec).rfc3339().to_string();
write!(
f,
"best={} height={} @ {}",
......@@ -104,14 +103,14 @@ impl HeaderList {
0
} else {
self.header_by_blockhash(&prev_blockhash)
.expect(&format!("{} is not part of the blockchain", prev_blockhash))
.unwrap_or_else(|| panic!("{} is not part of the blockchain", prev_blockhash))
.height()
+ 1
};
(new_height..)
.zip(hashed_headers.into_iter())
.map(|(height, hashed_header)| HeaderEntry {
height: height,
height,
hash: hashed_header.blockhash,
header: hashed_header.header,
})
......@@ -179,10 +178,7 @@ impl HeaderList {
pub fn tip(&self) -> &Sha256dHash {
assert_eq!(
self.tip,
self.headers
.last()
.map(|h| *h.hash())
.unwrap_or(Sha256dHash::default())
self.headers.last().map(|h| *h.hash()).unwrap_or_default()
);
&self.tip
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment