From ce9442e9b85611be14f18cd7b1026099a0228c75 Mon Sep 17 00:00:00 2001 From: Alex Page Date: Thu, 22 Jun 2023 02:11:57 -0400 Subject: [PATCH] Run clippy --fix --- src/confid/black_box.rs | 86 +++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 50 deletions(-) diff --git a/src/confid/black_box.rs b/src/confid/black_box.rs index d05f6a6..cdebd12 100644 --- a/src/confid/black_box.rs +++ b/src/confid/black_box.rs @@ -820,22 +820,19 @@ unsafe fn sha1_single_block(input: *mut u8, output: *mut u8) { let mut w: [u32; 80] = [0; 80]; let mut i = 0_i32 as usize; while i < 16 { - w[i as usize] = ((*input.offset((4 as usize).wrapping_mul(i) as isize) as i32) << 24_i32 - | (*input.offset((4 as usize).wrapping_mul(i).wrapping_add(1) as isize) as i32) - << 16_i32 - | (*input.offset((4 as usize).wrapping_mul(i).wrapping_add(2) as isize) as i32) - << 8_i32 - | *input.offset((4 as usize).wrapping_mul(i).wrapping_add(3) as isize) as i32) - as u32; + w[i] = ((*input.add(4_usize.wrapping_mul(i)) as i32) << 24_i32 + | (*input.add(4_usize.wrapping_mul(i).wrapping_add(1)) as i32) << 16_i32 + | (*input.add(4_usize.wrapping_mul(i).wrapping_add(2)) as i32) << 8_i32 + | *input.add(4_usize.wrapping_mul(i).wrapping_add(3)) as i32) as u32; i = i.wrapping_add(1); } i = 16_i32 as usize; while i < 80 { - w[i as usize] = rol( - w[i.wrapping_sub(3) as usize] - ^ w[i.wrapping_sub(8) as usize] - ^ w[i.wrapping_sub(14) as usize] - ^ w[i.wrapping_sub(16) as usize], + w[i] = rol( + w[i.wrapping_sub(3)] + ^ w[i.wrapping_sub(8)] + ^ w[i.wrapping_sub(14)] + ^ w[i.wrapping_sub(16)], 1_i32, ); i = i.wrapping_add(1); @@ -845,7 +842,7 @@ unsafe fn sha1_single_block(input: *mut u8, output: *mut u8) { let tmp: u32 = (rol(a, 5_i32)) .wrapping_add(b & c | !b & d) .wrapping_add(e) - .wrapping_add(w[i as usize]) + .wrapping_add(w[i]) .wrapping_add(0x5a827999_i32 as u32); e = d; d = c; @@ -859,7 +856,7 @@ unsafe fn sha1_single_block(input: *mut u8, output: *mut u8) { let tmp_0: u32 = (rol(a, 5_i32)) .wrapping_add(b ^ c ^ d) .wrapping_add(e) - .wrapping_add(w[i as usize]) + .wrapping_add(w[i]) .wrapping_add(0x6ed9eba1_i32 as u32); e = d; d = c; @@ -873,7 +870,7 @@ unsafe fn sha1_single_block(input: *mut u8, output: *mut u8) { let tmp_1: u32 = (rol(a, 5_i32)) .wrapping_add(b & c | b & d | c & d) .wrapping_add(e) - .wrapping_add(w[i as usize]) + .wrapping_add(w[i]) .wrapping_add(0x8f1bbcdc_u32); e = d; d = c; @@ -887,7 +884,7 @@ unsafe fn sha1_single_block(input: *mut u8, output: *mut u8) { let tmp_2: u32 = (rol(a, 5_i32)) .wrapping_add(b ^ c ^ d) .wrapping_add(e) - .wrapping_add(w[i as usize]) + .wrapping_add(w[i]) .wrapping_add(0xca62c1d6_u32); e = d; d = c; @@ -932,17 +929,13 @@ unsafe fn mix(buffer: *mut u8, buf_size: usize, key: *const u8, key_size: usize) for n in &mut sha1_input { *n = 0; } - ptr::copy_nonoverlapping( - buffer.offset(half as isize), - sha1_input.as_mut_ptr(), - half as usize, - ); + ptr::copy_nonoverlapping(buffer.add(half), sha1_input.as_mut_ptr(), half); ptr::copy_nonoverlapping( key as *const c_void, - sha1_input.as_mut_ptr().offset(half as isize) as *mut c_void, - key_size as usize, + sha1_input.as_mut_ptr().add(half) as *mut c_void, + key_size, ); - sha1_input[half.wrapping_add(key_size) as usize] = 0x80_i32 as u8; + sha1_input[half.wrapping_add(key_size)] = 0x80_i32 as u8; sha1_input [(::std::mem::size_of::<[u8; 64]>() as u64).wrapping_sub(1_i32 as u64) as usize] = half.wrapping_add(key_size).wrapping_mul(8) as u8; @@ -954,16 +947,15 @@ unsafe fn mix(buffer: *mut u8, buf_size: usize, key: *const u8, key_size: usize) sha1_single_block(sha1_input.as_mut_ptr(), sha1_result.as_mut_ptr()); let mut i = half & !3; while i < half { - sha1_result[i as usize] = - sha1_result[i.wrapping_add(4).wrapping_sub(half & 3) as usize]; + sha1_result[i] = sha1_result[i.wrapping_add(4).wrapping_sub(half & 3)]; i = i.wrapping_add(1); } i = 0_i32 as usize; while i < half { - let tmp: u8 = *buffer.offset(i.wrapping_add(half) as isize); - *buffer.offset(i.wrapping_add(half) as isize) = - (*buffer.offset(i as isize) as i32 ^ sha1_result[i as usize] as i32) as u8; - *buffer.offset(i as isize) = tmp; + let tmp: u8 = *buffer.add(i.wrapping_add(half)); + *buffer.add(i.wrapping_add(half)) = + (*buffer.add(i) as i32 ^ sha1_result[i] as i32) as u8; + *buffer.add(i) = tmp; i = i.wrapping_add(1); } external_counter += 1; @@ -979,13 +971,9 @@ unsafe fn unmix(buffer: *mut u8, buf_size: usize, key: *const u8, key_size: usiz for n in &mut sha1_input { *n = 0; } - ptr::copy_nonoverlapping(buffer, sha1_input.as_mut_ptr(), half as usize); - ptr::copy_nonoverlapping( - key, - sha1_input.as_mut_ptr().offset(half as isize), - key_size as usize, - ); - sha1_input[half.wrapping_add(key_size) as usize] = 0x80_i32 as u8; + ptr::copy_nonoverlapping(buffer, sha1_input.as_mut_ptr(), half); + ptr::copy_nonoverlapping(key, sha1_input.as_mut_ptr().add(half), key_size); + sha1_input[half.wrapping_add(key_size)] = 0x80_i32 as u8; sha1_input [(::std::mem::size_of::<[u8; 64]>() as u64).wrapping_sub(1_i32 as u64) as usize] = half.wrapping_add(key_size).wrapping_mul(8) as u8; @@ -997,16 +985,15 @@ unsafe fn unmix(buffer: *mut u8, buf_size: usize, key: *const u8, key_size: usiz sha1_single_block(sha1_input.as_mut_ptr(), sha1_result.as_mut_ptr()); let mut i = half & !3; while i < half { - sha1_result[i as usize] = - sha1_result[i.wrapping_add(4).wrapping_sub(half & 3) as usize]; + sha1_result[i] = sha1_result[i.wrapping_add(4).wrapping_sub(half & 3)]; i = i.wrapping_add(1); } i = 0_i32 as usize; while i < half { - let tmp: u8 = *buffer.offset(i as isize); - *buffer.offset(i as isize) = (*buffer.offset(i.wrapping_add(half) as isize) as i32 - ^ sha1_result[i as usize] as i32) as u8; - *buffer.offset(i.wrapping_add(half) as isize) = tmp; + let tmp: u8 = *buffer.add(i); + *buffer.add(i) = + (*buffer.add(i.wrapping_add(half)) as i32 ^ sha1_result[i] as i32) as u8; + *buffer.add(i.wrapping_add(half)) = tmp; i = i.wrapping_add(1); } external_counter += 1; @@ -1051,16 +1038,15 @@ pub unsafe fn generate(installation_id_str: *const i8, confirmation_id: *mut i8) let mut carry: u8 = d as u8; let mut i = 0_i32 as usize; while i < installation_id_len { - let x: u32 = - (installation_id[i as usize] as i32 * 10_i32 + carry as i32) as u32; - installation_id[i as usize] = (x & 0xff_i32 as u32) as u8; + let x: u32 = (installation_id[i] as i32 * 10_i32 + carry as i32) as u32; + installation_id[i] = (x & 0xff_i32 as u32) as u8; carry = (x >> 8_i32) as u8; i = i.wrapping_add(1); } if carry != 0 { let fresh1 = installation_id_len; installation_id_len = installation_id_len.wrapping_add(1); - installation_id[fresh1 as usize] = carry; + installation_id[fresh1] = carry; } } } @@ -1070,7 +1056,7 @@ pub unsafe fn generate(installation_id_str: *const i8, confirmation_id: *mut i8) return 1_i32; } while installation_id_len < ::std::mem::size_of::<[u8; 19]>() { - installation_id[installation_id_len as usize] = 0_i32 as u8; + installation_id[installation_id_len] = 0_i32 as u8; installation_id_len = installation_id_len.wrapping_add(1); } const IID_KEY: [u8; 4] = [ @@ -1260,7 +1246,7 @@ pub unsafe fn generate(installation_id_str: *const i8, confirmation_id: *mut i8) e.c2rust_unnamed_0.encoded[0_i32 as usize] = ((c3 as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64) .wrapping_div(10_i32 as u64) as u32; - decimal[(34 as usize).wrapping_sub(i) as usize] = c4 as u8; + decimal[34_usize.wrapping_sub(i)] = c4 as u8; i = i.wrapping_add(1); } let mut q: *mut i8 = confirmation_id; @@ -1271,7 +1257,7 @@ pub unsafe fn generate(installation_id_str: *const i8, confirmation_id: *mut i8) q = q.offset(1); *fresh2 = '-' as i32 as i8; } - let p_0: *mut u8 = decimal.as_mut_ptr().offset(i.wrapping_mul(5) as isize); + let p_0: *mut u8 = decimal.as_mut_ptr().add(i.wrapping_mul(5)); *q.offset(0_i32 as isize) = (*p_0.offset(0_i32 as isize) as i32 + '0' as i32) as i8; *q.offset(1_i32 as isize) = (*p_0.offset(1_i32 as isize) as i32 + '0' as i32) as i8; *q.offset(2_i32 as isize) = (*p_0.offset(2_i32 as isize) as i32 + '0' as i32) as i8;