fix: clippy lint (#80)
* clippy(merge_iterator): fix `clippy::non_canonical_partial_ord_impl` lint. * clippy(bloom): fix `clippy::manual_clamp` lint. * clippy(compact): fix `clippy::assigning_clones` lint. * clippy(key): fix `clippy::legacy_numeric_constants` lint. * clippy(mem_table): fix `clippy::missing_transmute_annotations` lint.
This commit is contained in:
@@ -330,7 +330,7 @@ impl LsmStorageInner {
|
||||
assert!(result.is_none());
|
||||
}
|
||||
assert_eq!(l1_sstables, state.levels[0].1);
|
||||
state.levels[0].1 = ids.clone();
|
||||
state.levels[0].1.clone_from(&ids);
|
||||
let mut l0_sstables_map = l0_sstables.iter().copied().collect::<HashSet<_>>();
|
||||
state.l0_sstables = state
|
||||
.l0_sstables
|
||||
|
@@ -12,27 +12,25 @@ struct HeapWrapper<I: StorageIterator>(pub usize, pub Box<I>);
|
||||
|
||||
impl<I: StorageIterator> PartialEq for HeapWrapper<I> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.partial_cmp(other).unwrap() == cmp::Ordering::Equal
|
||||
self.cmp(other) == cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: StorageIterator> Eq for HeapWrapper<I> {}
|
||||
|
||||
impl<I: StorageIterator> PartialOrd for HeapWrapper<I> {
|
||||
#[allow(clippy::non_canonical_partial_ord_impl)]
|
||||
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
|
||||
match self.1.key().cmp(&other.1.key()) {
|
||||
cmp::Ordering::Greater => Some(cmp::Ordering::Greater),
|
||||
cmp::Ordering::Less => Some(cmp::Ordering::Less),
|
||||
cmp::Ordering::Equal => self.0.partial_cmp(&other.0),
|
||||
}
|
||||
.map(|x| x.reverse())
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: StorageIterator> Ord for HeapWrapper<I> {
|
||||
fn cmp(&self, other: &Self) -> cmp::Ordering {
|
||||
self.partial_cmp(other).unwrap()
|
||||
self.1
|
||||
.key()
|
||||
.cmp(&other.1.key())
|
||||
.then(self.0.cmp(&other.0))
|
||||
.reverse()
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -14,10 +14,10 @@ pub const TS_ENABLED: bool = true;
|
||||
/// Temporary, should remove after implementing full week 3 day 1 + 2.
|
||||
pub const TS_DEFAULT: u64 = 0;
|
||||
|
||||
pub const TS_MAX: u64 = std::u64::MAX;
|
||||
pub const TS_MIN: u64 = std::u64::MIN;
|
||||
pub const TS_RANGE_BEGIN: u64 = std::u64::MAX;
|
||||
pub const TS_RANGE_END: u64 = std::u64::MIN;
|
||||
pub const TS_MAX: u64 = u64::MAX;
|
||||
pub const TS_MIN: u64 = u64::MIN;
|
||||
pub const TS_RANGE_BEGIN: u64 = u64::MAX;
|
||||
pub const TS_RANGE_END: u64 = u64::MIN;
|
||||
|
||||
impl<T: AsRef<[u8]>> Key<T> {
|
||||
pub fn into_inner(self) -> T {
|
||||
|
@@ -93,7 +93,7 @@ impl MemTable {
|
||||
/// Get a value by key. Should not be used in week 3.
|
||||
pub fn get(&self, key: KeySlice) -> Option<Bytes> {
|
||||
let key_bytes = KeyBytes::from_bytes_with_ts(
|
||||
Bytes::from_static(unsafe { std::mem::transmute(key.key_ref()) }),
|
||||
Bytes::from_static(unsafe { std::mem::transmute::<&[u8], &[u8]>(key.key_ref()) }),
|
||||
key.ts(),
|
||||
);
|
||||
self.map.get(&key_bytes).map(|e| e.value().clone())
|
||||
|
@@ -79,7 +79,7 @@ impl Bloom {
|
||||
/// Build bloom filter from key hashes
|
||||
pub fn build_from_key_hashes(keys: &[u32], bits_per_key: usize) -> Self {
|
||||
let k = (bits_per_key as f64 * 0.69) as u32;
|
||||
let k = k.min(30).max(1);
|
||||
let k = k.clamp(1, 30);
|
||||
let nbits = (keys.len() * bits_per_key).max(64);
|
||||
let nbytes = (nbits + 7) / 8;
|
||||
let nbits = nbytes * 8;
|
||||
|
@@ -14,27 +14,25 @@ struct HeapWrapper<I: StorageIterator>(pub usize, pub Box<I>);
|
||||
|
||||
impl<I: StorageIterator> PartialEq for HeapWrapper<I> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.partial_cmp(other).unwrap() == cmp::Ordering::Equal
|
||||
self.cmp(other) == cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: StorageIterator> Eq for HeapWrapper<I> {}
|
||||
|
||||
impl<I: StorageIterator> PartialOrd for HeapWrapper<I> {
|
||||
#[allow(clippy::non_canonical_partial_ord_impl)]
|
||||
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
|
||||
match self.1.key().cmp(&other.1.key()) {
|
||||
cmp::Ordering::Greater => Some(cmp::Ordering::Greater),
|
||||
cmp::Ordering::Less => Some(cmp::Ordering::Less),
|
||||
cmp::Ordering::Equal => self.0.partial_cmp(&other.0),
|
||||
}
|
||||
.map(|x| x.reverse())
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: StorageIterator> Ord for HeapWrapper<I> {
|
||||
fn cmp(&self, other: &Self) -> cmp::Ordering {
|
||||
self.partial_cmp(other).unwrap()
|
||||
self.1
|
||||
.key()
|
||||
.cmp(&other.1.key())
|
||||
.then(self.0.cmp(&other.0))
|
||||
.reverse()
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -72,7 +72,7 @@ impl Bloom {
|
||||
/// Build bloom filter from key hashes
|
||||
pub fn build_from_key_hashes(keys: &[u32], bits_per_key: usize) -> Self {
|
||||
let k = (bits_per_key as f64 * 0.69) as u32;
|
||||
let k = k.min(30).max(1);
|
||||
let k = k.clamp(1, 30);
|
||||
let nbits = (keys.len() * bits_per_key).max(64);
|
||||
let nbytes = (nbits + 7) / 8;
|
||||
let nbits = nbytes * 8;
|
||||
|
@@ -284,7 +284,7 @@ impl LsmStorageInner {
|
||||
assert!(result.is_none());
|
||||
}
|
||||
assert_eq!(l1_sstables, state.levels[0].1);
|
||||
state.levels[0].1 = ids.clone();
|
||||
state.levels[0].1.clone_from(&ids);
|
||||
let mut l0_sstables_map = l0_sstables.iter().copied().collect::<HashSet<_>>();
|
||||
state.l0_sstables = state
|
||||
.l0_sstables
|
||||
|
@@ -12,27 +12,25 @@ struct HeapWrapper<I: StorageIterator>(pub usize, pub Box<I>);
|
||||
|
||||
impl<I: StorageIterator> PartialEq for HeapWrapper<I> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.partial_cmp(other).unwrap() == cmp::Ordering::Equal
|
||||
self.cmp(other) == cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: StorageIterator> Eq for HeapWrapper<I> {}
|
||||
|
||||
impl<I: StorageIterator> PartialOrd for HeapWrapper<I> {
|
||||
#[allow(clippy::non_canonical_partial_ord_impl)]
|
||||
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
|
||||
match self.1.key().cmp(&other.1.key()) {
|
||||
cmp::Ordering::Greater => Some(cmp::Ordering::Greater),
|
||||
cmp::Ordering::Less => Some(cmp::Ordering::Less),
|
||||
cmp::Ordering::Equal => self.0.partial_cmp(&other.0),
|
||||
}
|
||||
.map(|x| x.reverse())
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: StorageIterator> Ord for HeapWrapper<I> {
|
||||
fn cmp(&self, other: &Self) -> cmp::Ordering {
|
||||
self.partial_cmp(other).unwrap()
|
||||
self.1
|
||||
.key()
|
||||
.cmp(&other.1.key())
|
||||
.then(self.0.cmp(&other.0))
|
||||
.reverse()
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -79,7 +79,7 @@ impl Bloom {
|
||||
/// Build bloom filter from key hashes
|
||||
pub fn build_from_key_hashes(keys: &[u32], bits_per_key: usize) -> Self {
|
||||
let k = (bits_per_key as f64 * 0.69) as u32;
|
||||
let k = k.min(30).max(1);
|
||||
let k = k.clamp(1, 30);
|
||||
let nbits = (keys.len() * bits_per_key).max(64);
|
||||
let nbytes = (nbits + 7) / 8;
|
||||
let nbits = nbytes * 8;
|
||||
|
Reference in New Issue
Block a user