Skip to content

Commit

Permalink
Make clippy "happy". (#1950)
Browse files Browse the repository at this point in the history
* Make clippy "happy".

Address all clippy complaints that are not purely stylistic (or even
have corner cases with false positives). Ignore all "style" and "pedantic" lints.

* Fix tests.

* Undo unnecessary API change.
  • Loading branch information
romanb authored Feb 15, 2021
1 parent 12557a3 commit 6499e92
Show file tree
Hide file tree
Showing 43 changed files with 239 additions and 263 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ jobs:
uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -A clippy::mutable_key_type -A clippy::type_complexity
args: -- -A clippy::type_complexity -A clippy::pedantic -A clippy::style

run-benchmarks:
runs-on: ubuntu-latest
Expand Down
17 changes: 9 additions & 8 deletions core/src/connection/pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,13 +182,13 @@ where
},
PoolEvent::ConnectionEvent { ref connection, ref event } => {
f.debug_struct("PoolEvent::ConnectionEvent")
.field("peer", connection.peer_id())
.field("peer", &connection.peer_id())
.field("event", event)
.finish()
},
PoolEvent::AddressChange { ref connection, ref new_endpoint, ref old_endpoint } => {
f.debug_struct("PoolEvent::AddressChange")
.field("peer", connection.peer_id())
.field("peer", &connection.peer_id())
.field("new_endpoint", new_endpoint)
.field("old_endpoint", old_endpoint)
.finish()
Expand Down Expand Up @@ -325,8 +325,8 @@ impl<TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
// "established" connection.
let future = future.and_then({
let endpoint = endpoint.clone();
let expected_peer = peer.clone();
let local_id = self.local_id.clone();
let expected_peer = peer;
let local_id = self.local_id;
move |(peer_id, muxer)| {
if let Some(peer) = expected_peer {
if peer != peer_id {
Expand Down Expand Up @@ -376,7 +376,7 @@ impl<TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
self.counters.check_max_established_per_peer(self.num_peer_established(&i.peer_id))?;
let id = self.manager.add(c, i.clone());
self.counters.inc_established(&i.endpoint);
self.established.entry(i.peer_id.clone()).or_default().insert(id, i.endpoint);
self.established.entry(i.peer_id).or_default().insert(id, i.endpoint);
Ok(id)
}

Expand Down Expand Up @@ -667,7 +667,7 @@ impl<TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
}

// Add the connection to the pool.
let peer = entry.connected().peer_id.clone();
let peer = entry.connected().peer_id;
let conns = self.established.entry(peer).or_default();
let num_established = NonZeroU32::new(u32::try_from(conns.len() + 1).unwrap())
.expect("n + 1 is always non-zero; qed");
Expand Down Expand Up @@ -786,8 +786,8 @@ impl<TInEvent> EstablishedConnection<'_, TInEvent> {
}

/// Returns the identity of the connected peer.
pub fn peer_id(&self) -> &PeerId {
&self.entry.connected().peer_id
pub fn peer_id(&self) -> PeerId {
self.entry.connected().peer_id
}

/// Returns the local connection ID.
Expand Down Expand Up @@ -842,6 +842,7 @@ where
I: Iterator<Item = ConnectionId>
{
/// Obtains the next connection, if any.
#[allow(clippy::should_implement_trait)]
pub fn next(&mut self) -> Option<EstablishedConnection<'_, TInEvent>>
{
while let Some(id) = self.ids.next() {
Expand Down
9 changes: 4 additions & 5 deletions core/src/network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -144,11 +144,10 @@ where
local_peer_id: PeerId,
config: NetworkConfig,
) -> Self {
let pool_local_id = local_peer_id.clone();
Network {
local_peer_id,
listeners: ListenersStream::new(transport),
pool: Pool::new(pool_local_id, config.manager_config, config.limits),
pool: Pool::new(local_peer_id, config.manager_config, config.limits),
dialing: Default::default(),
}
}
Expand Down Expand Up @@ -380,7 +379,7 @@ where
let event = match self.pool.poll(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(PoolEvent::ConnectionEstablished { connection, num_established }) => {
if let hash_map::Entry::Occupied(mut e) = self.dialing.entry(connection.peer_id().clone()) {
if let hash_map::Entry::Occupied(mut e) = self.dialing.entry(connection.peer_id()) {
e.get_mut().retain(|s| s.current.0 != connection.id());
if e.get().is_empty() {
e.remove();
Expand Down Expand Up @@ -526,7 +525,7 @@ where
if let Some(pos) = attempts.iter().position(|s| s.current.0 == id) {
let attempt = attempts.remove(pos);
let last = attempts.is_empty();
Some((peer.clone(), attempt, last))
Some((*peer, attempt, last))
} else {
None
}
Expand All @@ -545,7 +544,7 @@ where
if let Some(handler) = handler {
let next_attempt = attempt.remaining.remove(0);
let opts = DialingOpts {
peer: peer_id.clone(),
peer: peer_id,
handler,
address: next_attempt,
remaining: attempt.remaining
Expand Down
11 changes: 6 additions & 5 deletions core/src/network/peer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ where
};

let id = network.dial_peer(DialingOpts {
peer: peer_id.clone(),
peer: peer_id,
handler,
address,
remaining: remaining.into_iter().collect(),
Expand Down Expand Up @@ -435,7 +435,7 @@ where
pub fn attempt(&mut self, id: ConnectionId)
-> Option<DialingAttempt<'_, TInEvent>>
{
if let hash_map::Entry::Occupied(attempts) = self.network.dialing.entry(self.peer_id.clone()) {
if let hash_map::Entry::Occupied(attempts) = self.network.dialing.entry(self.peer_id) {
if let Some(pos) = attempts.get().iter().position(|s| s.current.0 == id) {
if let Some(inner) = self.network.pool.get_outgoing(id) {
return Some(DialingAttempt { pos, inner, attempts })
Expand Down Expand Up @@ -662,7 +662,8 @@ impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
}

/// Obtains the next dialing connection, if any.
pub fn next<'b>(&'b mut self) -> Option<DialingAttempt<'b, TInEvent>> {
#[allow(clippy::should_implement_trait)]
pub fn next(&mut self) -> Option<DialingAttempt<'_, TInEvent>> {
// If the number of elements reduced, the current `DialingAttempt` has been
// aborted and iteration needs to continue from the previous position to
// account for the removed element.
Expand All @@ -676,7 +677,7 @@ impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
return None
}

if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(self.peer_id.clone()) {
if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(*self.peer_id) {
let id = attempts.get()[self.pos].current.0;
if let Some(inner) = self.pool.get_outgoing(id) {
let conn = DialingAttempt { pos: self.pos, inner, attempts };
Expand All @@ -697,7 +698,7 @@ impl<'a, TInEvent, TOutEvent, THandler, TTransErr, THandlerErr>
return None
}

if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(self.peer_id.clone()) {
if let hash_map::Entry::Occupied(attempts) = self.dialing.entry(*self.peer_id) {
let id = attempts.get()[self.pos].current.0;
if let Some(inner) = self.pool.get_outgoing(id) {
return Some(DialingAttempt { pos: self.pos, inner, attempts })
Expand Down
2 changes: 1 addition & 1 deletion core/tests/network_dial_error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ fn deny_incoming_connec() {
multiaddr,
error: PendingConnectionError::Transport(_)
}) => {
assert_eq!(peer_id, *swarm1.local_peer_id());
assert_eq!(&peer_id, swarm1.local_peer_id());
assert_eq!(multiaddr, address);
return Poll::Ready(Ok(()));
},
Expand Down
30 changes: 14 additions & 16 deletions muxers/mplex/src/io.rs
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ where

// Remove the substream, scheduling pending frames as necessary.
match self.substreams.remove(&id) {
None => return,
None => {},
Some(state) => {
// If we fell below the substream limit, notify tasks that had
// interest in opening an outbound substream earlier.
Expand Down Expand Up @@ -442,7 +442,7 @@ where
// Read the next frame.
match ready!(self.poll_read_frame(cx, Some(id)))? {
Frame::Data { data, stream_id } if stream_id.into_local() == id => {
return Poll::Ready(Ok(Some(data.clone())))
return Poll::Ready(Ok(Some(data)))
},
Frame::Data { stream_id, data } => {
// The data frame is for a different stream than the one
Expand Down Expand Up @@ -595,18 +595,16 @@ where
// this task again to have a chance at progress.
trace!("{}: No task to read from blocked stream. Waking current task.", self.id);
cx.waker().clone().wake();
} else if let Some(id) = stream_id {
// We woke some other task, but are still interested in
// reading `Data` frames from the current stream when unblocked.
debug_assert!(blocked_id != &id, "Unexpected attempt at reading a new \
frame from a substream with a full buffer.");
let _ = NotifierRead::register_read_stream(&self.notifier_read, cx.waker(), id);
} else {
if let Some(id) = stream_id {
// We woke some other task, but are still interested in
// reading `Data` frames from the current stream when unblocked.
debug_assert!(blocked_id != &id, "Unexpected attempt at reading a new \
frame from a substream with a full buffer.");
let _ = NotifierRead::register_read_stream(&self.notifier_read, cx.waker(), id);
} else {
// We woke some other task but are still interested in
// reading new `Open` frames when unblocked.
let _ = NotifierRead::register_next_stream(&self.notifier_read, cx.waker());
}
// We woke some other task but are still interested in
// reading new `Open` frames when unblocked.
let _ = NotifierRead::register_next_stream(&self.notifier_read, cx.waker());
}

return Poll::Pending
Expand Down Expand Up @@ -932,7 +930,7 @@ impl NotifierRead {

impl ArcWake for NotifierRead {
fn wake_by_ref(this: &Arc<Self>) {
let wakers = mem::replace(&mut *this.read_stream.lock(), Default::default());
let wakers = mem::take(&mut *this.read_stream.lock());
for (_, waker) in wakers {
waker.wake();
}
Expand Down Expand Up @@ -963,7 +961,7 @@ impl NotifierWrite {

impl ArcWake for NotifierWrite {
fn wake_by_ref(this: &Arc<Self>) {
let wakers = mem::replace(&mut *this.pending.lock(), Default::default());
let wakers = mem::take(&mut *this.pending.lock());
for waker in wakers {
waker.wake();
}
Expand All @@ -985,7 +983,7 @@ impl NotifierOpen {
}

fn wake_all(&mut self) {
let wakers = mem::replace(&mut self.pending, Default::default());
let wakers = mem::take(&mut self.pending);
for waker in wakers {
waker.wake();
}
Expand Down
2 changes: 1 addition & 1 deletion muxers/mplex/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ where
-> Poll<Result<Self::Substream, io::Error>>
{
let stream_id = ready!(self.io.lock().poll_open_stream(cx))?;
return Poll::Ready(Ok(Substream::new(stream_id)))
Poll::Ready(Ok(Substream::new(stream_id)))
}

fn destroy_outbound(&self, _substream: Self::OutboundSubstream) {
Expand Down
25 changes: 12 additions & 13 deletions protocols/floodsub/src/layer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ use libp2p_swarm::{
DialPeerCondition,
};
use log::warn;
use rand;
use smallvec::SmallVec;
use std::{collections::VecDeque, iter};
use std::collections::hash_map::{DefaultHasher, HashMap};
Expand Down Expand Up @@ -89,7 +88,7 @@ impl Floodsub {
if self.connected_peers.contains_key(&peer_id) {
for topic in self.subscribed_topics.iter().cloned() {
self.events.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: peer_id.clone(),
peer_id,
handler: NotifyHandler::Any,
event: FloodsubRpc {
messages: Vec::new(),
Expand All @@ -102,7 +101,7 @@ impl Floodsub {
}
}

if self.target_peers.insert(peer_id.clone()) {
if self.target_peers.insert(peer_id) {
self.events.push_back(NetworkBehaviourAction::DialPeer {
peer_id, condition: DialPeerCondition::Disconnected
});
Expand All @@ -125,7 +124,7 @@ impl Floodsub {

for peer in self.connected_peers.keys() {
self.events.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: peer.clone(),
peer_id: *peer,
handler: NotifyHandler::Any,
event: FloodsubRpc {
messages: Vec::new(),
Expand Down Expand Up @@ -156,7 +155,7 @@ impl Floodsub {

for peer in self.connected_peers.keys() {
self.events.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: peer.clone(),
peer_id: *peer,
handler: NotifyHandler::Any,
event: FloodsubRpc {
messages: Vec::new(),
Expand Down Expand Up @@ -196,7 +195,7 @@ impl Floodsub {

fn publish_many_inner(&mut self, topic: impl IntoIterator<Item = impl Into<Topic>>, data: impl Into<Vec<u8>>, check_self_subscriptions: bool) {
let message = FloodsubMessage {
source: self.config.local_peer_id.clone(),
source: self.config.local_peer_id,
data: data.into(),
// If the sequence numbers are predictable, then an attacker could flood the network
// with packets with the predetermined sequence numbers and absorb our legitimate
Expand Down Expand Up @@ -231,7 +230,7 @@ impl Floodsub {
}

self.events.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: peer_id.clone(),
peer_id: *peer_id,
handler: NotifyHandler::Any,
event: FloodsubRpc {
subscriptions: Vec::new(),
Expand Down Expand Up @@ -259,7 +258,7 @@ impl NetworkBehaviour for Floodsub {
if self.target_peers.contains(id) {
for topic in self.subscribed_topics.iter().cloned() {
self.events.push_back(NetworkBehaviourAction::NotifyHandler {
peer_id: id.clone(),
peer_id: *id,
handler: NotifyHandler::Any,
event: FloodsubRpc {
messages: Vec::new(),
Expand All @@ -272,7 +271,7 @@ impl NetworkBehaviour for Floodsub {
}
}

self.connected_peers.insert(id.clone(), SmallVec::new());
self.connected_peers.insert(*id, SmallVec::new());
}

fn inject_disconnected(&mut self, id: &PeerId) {
Expand All @@ -283,7 +282,7 @@ impl NetworkBehaviour for Floodsub {
// try to reconnect.
if self.target_peers.contains(id) {
self.events.push_back(NetworkBehaviourAction::DialPeer {
peer_id: id.clone(),
peer_id: *id,
condition: DialPeerCondition::Disconnected
});
}
Expand Down Expand Up @@ -312,7 +311,7 @@ impl NetworkBehaviour for Floodsub {
remote_peer_topics.push(subscription.topic.clone());
}
self.events.push_back(NetworkBehaviourAction::GenerateEvent(FloodsubEvent::Subscribed {
peer_id: propagation_source.clone(),
peer_id: propagation_source,
topic: subscription.topic,
}));
}
Expand All @@ -321,7 +320,7 @@ impl NetworkBehaviour for Floodsub {
remote_peer_topics.remove(pos);
}
self.events.push_back(NetworkBehaviourAction::GenerateEvent(FloodsubEvent::Unsubscribed {
peer_id: propagation_source.clone(),
peer_id: propagation_source,
topic: subscription.topic,
}));
}
Expand Down Expand Up @@ -364,7 +363,7 @@ impl NetworkBehaviour for Floodsub {
if let Some(pos) = rpcs_to_dispatch.iter().position(|(p, _)| p == peer_id) {
rpcs_to_dispatch[pos].1.messages.push(message.clone());
} else {
rpcs_to_dispatch.push((peer_id.clone(), FloodsubRpc {
rpcs_to_dispatch.push((*peer_id, FloodsubRpc {
subscriptions: Vec::new(),
messages: vec![message.clone()],
}));
Expand Down
6 changes: 3 additions & 3 deletions protocols/gossipsub/src/backoff.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ impl BackoffStorage {
backoffs_by_heartbeat: &mut Vec<HashSet<_>>,
heartbeat_interval,
backoff_slack| {
let pair = (topic.clone(), peer.clone());
let pair = (topic.clone(), *peer);
let index = (heartbeat_index.0
+ Self::heartbeats(&time, heartbeat_interval)
+ backoff_slack as usize)
Expand All @@ -90,12 +90,12 @@ impl BackoffStorage {
.backoffs
.entry(topic.clone())
.or_insert_with(HashMap::new)
.entry(peer.clone())
.entry(*peer)
{
Entry::Occupied(mut o) => {
let (backoff, index) = o.get();
if backoff < &instant {
let pair = (topic.clone(), peer.clone());
let pair = (topic.clone(), *peer);
if let Some(s) = self.backoffs_by_heartbeat.get_mut(index.0) {
s.remove(&pair);
}
Expand Down
Loading

0 comments on commit 6499e92

Please sign in to comment.