Run clippy --fix again
This commit is contained in:
parent
7145e14d29
commit
dfbd994d19
1 changed files with 104 additions and 111 deletions
215
src/confid.rs
215
src/confid.rs
|
@ -64,7 +64,7 @@ static mut f: [uint64_t; 6] = [
|
|||
1_i32 as uint64_t,
|
||||
];
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn residue_add(mut x: uint64_t, mut y: uint64_t) -> uint64_t {
|
||||
unsafe extern "C" fn residue_add(x: uint64_t, y: uint64_t) -> uint64_t {
|
||||
let mut z: uint64_t = x.wrapping_add(y);
|
||||
if z >= 0x16a6b036d7f2a79_u64 {
|
||||
z = z.wrapping_sub(0x16a6b036d7f2a79_u64) as uint64_t as uint64_t;
|
||||
|
@ -72,7 +72,7 @@ unsafe extern "C" fn residue_add(mut x: uint64_t, mut y: uint64_t) -> uint64_t {
|
|||
z
|
||||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn residue_sub(mut x: uint64_t, mut y: uint64_t) -> uint64_t {
|
||||
unsafe extern "C" fn residue_sub(x: uint64_t, y: uint64_t) -> uint64_t {
|
||||
let mut z: uint64_t = x.wrapping_sub(y);
|
||||
if x < y {
|
||||
z = z.wrapping_add(0x16a6b036d7f2a79_u64) as uint64_t as uint64_t;
|
||||
|
@ -81,20 +81,20 @@ unsafe extern "C" fn residue_sub(mut x: uint64_t, mut y: uint64_t) -> uint64_t {
|
|||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn __umul128(
|
||||
mut multiplier: uint64_t,
|
||||
mut multiplicand: uint64_t,
|
||||
mut product_hi: *mut uint64_t,
|
||||
multiplier: uint64_t,
|
||||
multiplicand: uint64_t,
|
||||
product_hi: *mut uint64_t,
|
||||
) -> uint64_t {
|
||||
let mut a: uint64_t = multiplier >> 32_i32;
|
||||
let mut b: uint64_t = multiplier & 0xffffffff_u32 as u64;
|
||||
let mut c: uint64_t = multiplicand >> 32_i32;
|
||||
let mut d: uint64_t = multiplicand & 0xffffffff_u32 as u64;
|
||||
let mut ad: uint64_t = a.wrapping_mul(d);
|
||||
let mut bd: uint64_t = b.wrapping_mul(d);
|
||||
let mut adbc: uint64_t = ad.wrapping_add(b.wrapping_mul(c));
|
||||
let mut adbc_carry: uint64_t = (if adbc < ad { 1_i32 } else { 0_i32 }) as uint64_t;
|
||||
let mut product_lo: uint64_t = bd.wrapping_add(adbc << 32_i32);
|
||||
let mut product_lo_carry: uint64_t = (if product_lo < bd { 1_i32 } else { 0_i32 }) as uint64_t;
|
||||
let a: uint64_t = multiplier >> 32_i32;
|
||||
let b: uint64_t = multiplier & 0xffffffff_u32 as u64;
|
||||
let c: uint64_t = multiplicand >> 32_i32;
|
||||
let d: uint64_t = multiplicand & 0xffffffff_u32 as u64;
|
||||
let ad: uint64_t = a.wrapping_mul(d);
|
||||
let bd: uint64_t = b.wrapping_mul(d);
|
||||
let adbc: uint64_t = ad.wrapping_add(b.wrapping_mul(c));
|
||||
let adbc_carry: uint64_t = (if adbc < ad { 1_i32 } else { 0_i32 }) as uint64_t;
|
||||
let product_lo: uint64_t = bd.wrapping_add(adbc << 32_i32);
|
||||
let product_lo_carry: uint64_t = (if product_lo < bd { 1_i32 } else { 0_i32 }) as uint64_t;
|
||||
*product_hi = a
|
||||
.wrapping_mul(c)
|
||||
.wrapping_add(adbc >> 32_i32)
|
||||
|
@ -103,18 +103,18 @@ unsafe extern "C" fn __umul128(
|
|||
product_lo
|
||||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn ui128_quotient_mod(mut lo: uint64_t, mut hi: uint64_t) -> uint64_t {
|
||||
unsafe extern "C" fn ui128_quotient_mod(lo: uint64_t, hi: uint64_t) -> uint64_t {
|
||||
let mut prod1: uint64_t = 0;
|
||||
__umul128(lo, 0x604fa6a1c6346a87_i64 as uint64_t, &mut prod1);
|
||||
let mut part1hi: uint64_t = 0;
|
||||
let mut part1lo: uint64_t = __umul128(lo, 0x2d351c6d04f8b_i64 as uint64_t, &mut part1hi);
|
||||
let part1lo: uint64_t = __umul128(lo, 0x2d351c6d04f8b_i64 as uint64_t, &mut part1hi);
|
||||
let mut part2hi: uint64_t = 0;
|
||||
let mut part2lo: uint64_t = __umul128(hi, 0x604fa6a1c6346a87_i64 as uint64_t, &mut part2hi);
|
||||
let part2lo: uint64_t = __umul128(hi, 0x604fa6a1c6346a87_i64 as uint64_t, &mut part2hi);
|
||||
let mut sum1: uint64_t = part1lo.wrapping_add(part2lo);
|
||||
let mut sum1carry: u32 = (sum1 < part1lo) as i32 as u32;
|
||||
sum1 = sum1.wrapping_add(prod1) as uint64_t as uint64_t;
|
||||
sum1carry = sum1carry.wrapping_add((sum1 < prod1) as i32 as u32);
|
||||
let mut prod2: uint64_t = part1hi.wrapping_add(part2hi).wrapping_add(sum1carry as u64);
|
||||
let prod2: uint64_t = part1hi.wrapping_add(part2hi).wrapping_add(sum1carry as u64);
|
||||
let mut prod3hi: uint64_t = 0;
|
||||
let mut prod3lo: uint64_t = __umul128(hi, 0x2d351c6d04f8b_i64 as uint64_t, &mut prod3hi);
|
||||
prod3lo = prod3lo.wrapping_add(prod2) as uint64_t as uint64_t;
|
||||
|
@ -122,14 +122,14 @@ unsafe extern "C" fn ui128_quotient_mod(mut lo: uint64_t, mut hi: uint64_t) -> u
|
|||
prod3lo >> 42_i32 | prod3hi << 22_i32
|
||||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn residue_mul(mut x: uint64_t, mut y: uint64_t) -> uint64_t {
|
||||
unsafe extern "C" fn residue_mul(x: uint64_t, y: uint64_t) -> uint64_t {
|
||||
let mut hi: uint64_t = 0;
|
||||
let mut lo: uint64_t = __umul128(x, y, &mut hi);
|
||||
let mut quotient: uint64_t = ui128_quotient_mod(lo, hi);
|
||||
let lo: uint64_t = __umul128(x, y, &mut hi);
|
||||
let quotient: uint64_t = ui128_quotient_mod(lo, hi);
|
||||
lo.wrapping_sub(quotient.wrapping_mul(0x16a6b036d7f2a79_u64)) as uint64_t
|
||||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn residue_pow(mut x: uint64_t, mut y: uint64_t) -> uint64_t {
|
||||
unsafe extern "C" fn residue_pow(x: uint64_t, mut y: uint64_t) -> uint64_t {
|
||||
if y == 0_i32 as u64 {
|
||||
return 1_i32 as uint64_t;
|
||||
}
|
||||
|
@ -156,10 +156,10 @@ unsafe extern "C" fn inverse(mut u: uint64_t, mut v: uint64_t) -> uint64_t {
|
|||
let mut tmp: int64_t = 0;
|
||||
let mut xu: int64_t = 1_i32 as int64_t;
|
||||
let mut xv: int64_t = 0_i32 as int64_t;
|
||||
let mut v0: uint64_t = v;
|
||||
let v0: uint64_t = v;
|
||||
while u > 1_i32 as u64 {
|
||||
let mut d: uint64_t = v.wrapping_div(u);
|
||||
let mut remainder: uint64_t = v.wrapping_rem(u);
|
||||
let d: uint64_t = v.wrapping_div(u);
|
||||
let remainder: uint64_t = v.wrapping_rem(u);
|
||||
tmp = u as int64_t;
|
||||
u = remainder;
|
||||
v = tmp as uint64_t;
|
||||
|
@ -172,15 +172,15 @@ unsafe extern "C" fn inverse(mut u: uint64_t, mut v: uint64_t) -> uint64_t {
|
|||
xu as uint64_t
|
||||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn residue_inv(mut x: uint64_t) -> uint64_t {
|
||||
unsafe extern "C" fn residue_inv(x: uint64_t) -> uint64_t {
|
||||
inverse(x, 0x16a6b036d7f2a79_u64 as uint64_t)
|
||||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn residue_sqrt(mut what: uint64_t) -> uint64_t {
|
||||
unsafe extern "C" fn residue_sqrt(what: uint64_t) -> uint64_t {
|
||||
if what == 0 {
|
||||
return 0_i32 as uint64_t;
|
||||
}
|
||||
let mut g: uint64_t = 43_i32 as uint64_t;
|
||||
let g: uint64_t = 43_i32 as uint64_t;
|
||||
let mut z: uint64_t = 0;
|
||||
let mut y: uint64_t = 0;
|
||||
let mut r: uint64_t = 0;
|
||||
|
@ -267,7 +267,7 @@ unsafe extern "C" fn find_divisor_v(mut d: *mut TDivisor) -> i32 {
|
|||
}
|
||||
return 0_i32;
|
||||
}
|
||||
let mut sqr: uint64_t = residue_mul(
|
||||
let sqr: uint64_t = residue_mul(
|
||||
residue_mul(f1, f1),
|
||||
residue_inv(residue_add(coeff1, coeff1)),
|
||||
);
|
||||
|
@ -285,7 +285,7 @@ unsafe extern "C" fn find_divisor_v(mut d: *mut TDivisor) -> i32 {
|
|||
return 0_i32;
|
||||
}
|
||||
d_0 = residue_add(d_0, d_0);
|
||||
let mut inv: uint64_t = residue_inv(coeff2);
|
||||
let inv: uint64_t = residue_inv(coeff2);
|
||||
let mut root: uint64_t = residue_mul(residue_add(coeff1, d_0), inv);
|
||||
v1 = residue_sqrt(root);
|
||||
if v1 == 0xffffffffffffffff_u64 {
|
||||
|
@ -296,7 +296,7 @@ unsafe extern "C" fn find_divisor_v(mut d: *mut TDivisor) -> i32 {
|
|||
}
|
||||
}
|
||||
}
|
||||
let mut v0: uint64_t = residue_mul(
|
||||
let v0: uint64_t = residue_mul(
|
||||
residue_add(f1, residue_mul(u1, residue_mul(v1, v1))),
|
||||
residue_inv(residue_add(v1, v1)),
|
||||
);
|
||||
|
@ -306,12 +306,12 @@ unsafe extern "C" fn find_divisor_v(mut d: *mut TDivisor) -> i32 {
|
|||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn polynomial_mul(
|
||||
mut adeg: i32,
|
||||
mut a: *const uint64_t,
|
||||
mut bdeg: i32,
|
||||
mut b: *const uint64_t,
|
||||
adeg: i32,
|
||||
a: *const uint64_t,
|
||||
bdeg: i32,
|
||||
b: *const uint64_t,
|
||||
mut resultprevdeg: i32,
|
||||
mut result: *mut uint64_t,
|
||||
result: *mut uint64_t,
|
||||
) -> i32 {
|
||||
if adeg < 0_i32 || bdeg < 0_i32 {
|
||||
return resultprevdeg;
|
||||
|
@ -343,17 +343,17 @@ unsafe extern "C" fn polynomial_mul(
|
|||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn polynomial_div_monic(
|
||||
mut adeg: i32,
|
||||
mut a: *mut uint64_t,
|
||||
mut bdeg: i32,
|
||||
mut b: *const uint64_t,
|
||||
mut quotient: *mut uint64_t,
|
||||
adeg: i32,
|
||||
a: *mut uint64_t,
|
||||
bdeg: i32,
|
||||
b: *const uint64_t,
|
||||
quotient: *mut uint64_t,
|
||||
) -> i32 {
|
||||
let mut i: i32 = 0;
|
||||
let mut j: i32 = 0;
|
||||
i = adeg - bdeg;
|
||||
while i >= 0_i32 {
|
||||
let mut q: uint64_t = *a.offset((i + bdeg) as isize);
|
||||
let q: uint64_t = *a.offset((i + bdeg) as isize);
|
||||
if !quotient.is_null() {
|
||||
*quotient.offset(i as isize) = q;
|
||||
}
|
||||
|
@ -376,16 +376,16 @@ unsafe extern "C" fn polynomial_div_monic(
|
|||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn polynomial_xgcd(
|
||||
mut adeg: i32,
|
||||
mut a: *const uint64_t,
|
||||
mut bdeg: i32,
|
||||
mut b: *const uint64_t,
|
||||
mut pgcddeg: *mut i32,
|
||||
mut gcd: *mut uint64_t,
|
||||
mut pmult1deg: *mut i32,
|
||||
mut mult1: *mut uint64_t,
|
||||
mut pmult2deg: *mut i32,
|
||||
mut mult2: *mut uint64_t,
|
||||
adeg: i32,
|
||||
a: *const uint64_t,
|
||||
bdeg: i32,
|
||||
b: *const uint64_t,
|
||||
pgcddeg: *mut i32,
|
||||
gcd: *mut uint64_t,
|
||||
pmult1deg: *mut i32,
|
||||
mult1: *mut uint64_t,
|
||||
pmult2deg: *mut i32,
|
||||
mult2: *mut uint64_t,
|
||||
) {
|
||||
let mut sdeg: i32 = -1_i32;
|
||||
let mut s: [uint64_t; 3] = [0_i32 as uint64_t, 0_i32 as uint64_t, 0_i32 as uint64_t];
|
||||
|
@ -451,8 +451,8 @@ unsafe extern "C" fn polynomial_xgcd(
|
|||
t[2_i32 as usize] = *mult2.offset(2_i32 as isize);
|
||||
*mult2.offset(2_i32 as isize) = tmp2;
|
||||
} else {
|
||||
let mut delta: i32 = gcddeg - rdeg;
|
||||
let mut mult: uint64_t =
|
||||
let delta: i32 = gcddeg - rdeg;
|
||||
let mult: uint64_t =
|
||||
residue_mul(*gcd.offset(gcddeg as isize), residue_inv(r[rdeg as usize]));
|
||||
let mut i: i32 = 0_i32;
|
||||
while i <= rdeg {
|
||||
|
@ -501,9 +501,9 @@ unsafe extern "C" fn polynomial_xgcd(
|
|||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn u2poly(
|
||||
mut src: *const TDivisor,
|
||||
mut polyu: *mut uint64_t,
|
||||
mut polyv: *mut uint64_t,
|
||||
src: *const TDivisor,
|
||||
polyu: *mut uint64_t,
|
||||
polyv: *mut uint64_t,
|
||||
) -> i32 {
|
||||
if (*src).u[1_i32 as usize] as u64 != 0xffffffffffffffff_u64 {
|
||||
*polyu.offset(0_i32 as isize) = (*src).u[0_i32 as usize] as uint64_t;
|
||||
|
@ -527,16 +527,16 @@ unsafe extern "C" fn u2poly(
|
|||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn divisor_add(
|
||||
mut src1: *const TDivisor,
|
||||
mut src2: *const TDivisor,
|
||||
src1: *const TDivisor,
|
||||
src2: *const TDivisor,
|
||||
mut dst: *mut TDivisor,
|
||||
) {
|
||||
let mut u1: [uint64_t; 3] = [0; 3];
|
||||
let mut u2: [uint64_t; 3] = [0; 3];
|
||||
let mut v1: [uint64_t; 2] = [0; 2];
|
||||
let mut v2: [uint64_t; 2] = [0; 2];
|
||||
let mut u1deg: i32 = u2poly(src1, u1.as_mut_ptr(), v1.as_mut_ptr());
|
||||
let mut u2deg: i32 = u2poly(src2, u2.as_mut_ptr(), v2.as_mut_ptr());
|
||||
let u1deg: i32 = u2poly(src1, u1.as_mut_ptr(), v1.as_mut_ptr());
|
||||
let u2deg: i32 = u2poly(src2, u2.as_mut_ptr(), v2.as_mut_ptr());
|
||||
let mut d1deg: i32 = 0;
|
||||
let mut e1deg: i32 = 0;
|
||||
let mut e2deg: i32 = 0;
|
||||
|
@ -560,7 +560,7 @@ unsafe extern "C" fn divisor_add(
|
|||
residue_add(v1[1_i32 as usize], v2[1_i32 as usize]),
|
||||
0_i32 as uint64_t,
|
||||
];
|
||||
let mut bdeg: i32 = if b[1_i32 as usize] == 0_i32 as u64 {
|
||||
let bdeg: i32 = if b[1_i32 as usize] == 0_i32 as u64 {
|
||||
if b[0_i32 as usize] == 0_i32 as u64 {
|
||||
-1_i32
|
||||
} else {
|
||||
|
@ -587,7 +587,7 @@ unsafe extern "C" fn divisor_add(
|
|||
&mut c2deg,
|
||||
c2.as_mut_ptr(),
|
||||
);
|
||||
let mut dmult: uint64_t = residue_inv(d[ddeg as usize]);
|
||||
let dmult: uint64_t = residue_inv(d[ddeg as usize]);
|
||||
let mut i: i32 = 0;
|
||||
i = 0_i32;
|
||||
while i < ddeg {
|
||||
|
@ -744,7 +744,7 @@ unsafe extern "C" fn divisor_add(
|
|||
udiv_0.as_mut_ptr(),
|
||||
);
|
||||
udeg = tmpdeg - udeg;
|
||||
let mut mult: uint64_t = residue_inv(udiv_0[udeg as usize]);
|
||||
let mult: uint64_t = residue_inv(udiv_0[udeg as usize]);
|
||||
i = 0_i32;
|
||||
while i < udeg {
|
||||
u[i as usize] = residue_mul(udiv_0[i as usize], mult);
|
||||
|
@ -794,11 +794,7 @@ unsafe extern "C" fn divisor_add(
|
|||
};
|
||||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn divisor_mul(
|
||||
mut src: *const TDivisor,
|
||||
mut mult: uint64_t,
|
||||
mut dst: *mut TDivisor,
|
||||
) {
|
||||
unsafe extern "C" fn divisor_mul(src: *const TDivisor, mut mult: uint64_t, mut dst: *mut TDivisor) {
|
||||
if mult == 0_i32 as u64 {
|
||||
(*dst).u[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
||||
(*dst).u[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
||||
|
@ -825,7 +821,7 @@ unsafe extern "C" fn divisor_mul(
|
|||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn divisor_mul128(
|
||||
mut src: *const TDivisor,
|
||||
src: *const TDivisor,
|
||||
mut mult_lo: uint64_t,
|
||||
mut mult_hi: uint64_t,
|
||||
mut dst: *mut TDivisor,
|
||||
|
@ -863,14 +859,11 @@ unsafe extern "C" fn divisor_mul128(
|
|||
}
|
||||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn rol(mut x: u32, mut shift: i32) -> u32 {
|
||||
unsafe extern "C" fn rol(x: u32, shift: i32) -> u32 {
|
||||
x << shift | x >> (32_i32 - shift)
|
||||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn sha1_single_block(
|
||||
mut input: *mut libc::c_uchar,
|
||||
mut output: *mut libc::c_uchar,
|
||||
) {
|
||||
unsafe extern "C" fn sha1_single_block(input: *mut libc::c_uchar, output: *mut libc::c_uchar) {
|
||||
let mut a: u32 = 0;
|
||||
let mut b: u32 = 0;
|
||||
let mut c: u32 = 0;
|
||||
|
@ -909,7 +902,7 @@ unsafe extern "C" fn sha1_single_block(
|
|||
}
|
||||
i = 0_i32 as size_t;
|
||||
while i < 20_i32 as u64 {
|
||||
let mut tmp: u32 = (rol(a, 5_i32))
|
||||
let tmp: u32 = (rol(a, 5_i32))
|
||||
.wrapping_add(b & c | !b & d)
|
||||
.wrapping_add(e)
|
||||
.wrapping_add(w[i as usize])
|
||||
|
@ -923,7 +916,7 @@ unsafe extern "C" fn sha1_single_block(
|
|||
}
|
||||
i = 20_i32 as size_t;
|
||||
while i < 40_i32 as u64 {
|
||||
let mut tmp_0: u32 = (rol(a, 5_i32))
|
||||
let tmp_0: u32 = (rol(a, 5_i32))
|
||||
.wrapping_add(b ^ c ^ d)
|
||||
.wrapping_add(e)
|
||||
.wrapping_add(w[i as usize])
|
||||
|
@ -937,7 +930,7 @@ unsafe extern "C" fn sha1_single_block(
|
|||
}
|
||||
i = 40_i32 as size_t;
|
||||
while i < 60_i32 as u64 {
|
||||
let mut tmp_1: u32 = (rol(a, 5_i32))
|
||||
let tmp_1: u32 = (rol(a, 5_i32))
|
||||
.wrapping_add(b & c | b & d | c & d)
|
||||
.wrapping_add(e)
|
||||
.wrapping_add(w[i as usize])
|
||||
|
@ -951,7 +944,7 @@ unsafe extern "C" fn sha1_single_block(
|
|||
}
|
||||
i = 60_i32 as size_t;
|
||||
while i < 80_i32 as u64 {
|
||||
let mut tmp_2: u32 = (rol(a, 5_i32))
|
||||
let tmp_2: u32 = (rol(a, 5_i32))
|
||||
.wrapping_add(b ^ c ^ d)
|
||||
.wrapping_add(e)
|
||||
.wrapping_add(w[i as usize])
|
||||
|
@ -991,14 +984,14 @@ unsafe extern "C" fn sha1_single_block(
|
|||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn Mix(
|
||||
mut buffer: *mut libc::c_uchar,
|
||||
mut bufSize: size_t,
|
||||
mut key: *const libc::c_uchar,
|
||||
mut keySize: size_t,
|
||||
buffer: *mut libc::c_uchar,
|
||||
bufSize: size_t,
|
||||
key: *const libc::c_uchar,
|
||||
keySize: size_t,
|
||||
) {
|
||||
let mut sha1_input: [libc::c_uchar; 64] = [0; 64];
|
||||
let mut sha1_result: [libc::c_uchar; 20] = [0; 20];
|
||||
let mut half: size_t = bufSize.wrapping_div(2_i32 as u64);
|
||||
let half: size_t = bufSize.wrapping_div(2_i32 as u64);
|
||||
let mut external_counter: i32 = 0;
|
||||
external_counter = 0_i32;
|
||||
while external_counter < 4_i32 {
|
||||
|
@ -1037,7 +1030,7 @@ unsafe extern "C" fn Mix(
|
|||
}
|
||||
i = 0_i32 as size_t;
|
||||
while i < half {
|
||||
let mut tmp: libc::c_uchar = *buffer.offset(i.wrapping_add(half) as isize);
|
||||
let tmp: libc::c_uchar = *buffer.offset(i.wrapping_add(half) as isize);
|
||||
*buffer.offset(i.wrapping_add(half) as isize) = (*buffer.offset(i as isize) as i32
|
||||
^ sha1_result[i as usize] as i32)
|
||||
as libc::c_uchar;
|
||||
|
@ -1049,14 +1042,14 @@ unsafe extern "C" fn Mix(
|
|||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn Unmix(
|
||||
mut buffer: *mut libc::c_uchar,
|
||||
mut bufSize: size_t,
|
||||
mut key: *const libc::c_uchar,
|
||||
mut keySize: size_t,
|
||||
buffer: *mut libc::c_uchar,
|
||||
bufSize: size_t,
|
||||
key: *const libc::c_uchar,
|
||||
keySize: size_t,
|
||||
) {
|
||||
let mut sha1_input: [libc::c_uchar; 64] = [0; 64];
|
||||
let mut sha1_result: [libc::c_uchar; 20] = [0; 20];
|
||||
let mut half: size_t = bufSize.wrapping_div(2_i32 as u64);
|
||||
let half: size_t = bufSize.wrapping_div(2_i32 as u64);
|
||||
let mut external_counter: i32 = 0;
|
||||
external_counter = 0_i32;
|
||||
while external_counter < 4_i32 {
|
||||
|
@ -1095,7 +1088,7 @@ unsafe extern "C" fn Unmix(
|
|||
}
|
||||
i = 0_i32 as size_t;
|
||||
while i < half {
|
||||
let mut tmp: libc::c_uchar = *buffer.offset(i as isize);
|
||||
let tmp: libc::c_uchar = *buffer.offset(i as isize);
|
||||
*buffer.offset(i as isize) = (*buffer.offset(i.wrapping_add(half) as isize) as i32
|
||||
^ sha1_result[i as usize] as i32)
|
||||
as libc::c_uchar;
|
||||
|
@ -1107,8 +1100,8 @@ unsafe extern "C" fn Unmix(
|
|||
}
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn Generate(
|
||||
mut installation_id_str: *const libc::c_char,
|
||||
mut confirmation_id: *mut libc::c_char,
|
||||
installation_id_str: *const libc::c_char,
|
||||
confirmation_id: *mut libc::c_char,
|
||||
) -> i32 {
|
||||
let mut installation_id: [libc::c_uchar; 19] = [0; 19];
|
||||
let mut installation_id_len: size_t = 0_i32 as size_t;
|
||||
|
@ -1119,7 +1112,7 @@ unsafe extern "C" fn Generate(
|
|||
let mut i: size_t = 0;
|
||||
while *p != 0 {
|
||||
if !(*p as i32 == ' ' as i32 || *p as i32 == '-' as i32) {
|
||||
let mut d: i32 = *p as i32 - '0' as i32;
|
||||
let d: i32 = *p as i32 - '0' as i32;
|
||||
if !(0_i32..=9_i32).contains(&d) {
|
||||
return 3_i32;
|
||||
}
|
||||
|
@ -1152,7 +1145,7 @@ unsafe extern "C" fn Generate(
|
|||
let mut carry: libc::c_uchar = d as libc::c_uchar;
|
||||
i = 0_i32 as size_t;
|
||||
while i < installation_id_len {
|
||||
let mut x: u32 =
|
||||
let x: u32 =
|
||||
(installation_id[i as usize] as i32 * 10_i32 + carry as i32) as u32;
|
||||
installation_id[i as usize] = (x & 0xff_i32 as u32) as libc::c_uchar;
|
||||
carry = (x >> 8_i32) as libc::c_uchar;
|
||||
|
@ -1204,13 +1197,13 @@ unsafe extern "C" fn Generate(
|
|||
installation_id.as_mut_ptr() as *const libc::c_void,
|
||||
::std::mem::size_of::<C2RustUnnamed_4>() as u64,
|
||||
);
|
||||
let mut productId1: u32 = (parsed.ProductIDLow & ((1_i32 << 17_i32) - 1_i32) as u64) as u32;
|
||||
let mut productId2: u32 =
|
||||
let productId1: u32 = (parsed.ProductIDLow & ((1_i32 << 17_i32) - 1_i32) as u64) as u32;
|
||||
let productId2: u32 =
|
||||
(parsed.ProductIDLow >> 17_i32 & ((1_i32 << 10_i32) - 1_i32) as u64) as u32;
|
||||
let mut productId3: u32 =
|
||||
let productId3: u32 =
|
||||
(parsed.ProductIDLow >> 27_i32 & ((1_i32 << 25_i32) - 1_i32) as u64) as u32;
|
||||
let mut version: u32 = (parsed.ProductIDLow >> 52_i32 & 7_i32 as u64) as u32;
|
||||
let mut productId4: u32 =
|
||||
let version: u32 = (parsed.ProductIDLow >> 52_i32 & 7_i32 as u64) as u32;
|
||||
let productId4: u32 =
|
||||
(parsed.ProductIDLow >> 55_i32 | ((parsed.ProductIDHigh as i32) << 9_i32) as u64) as u32;
|
||||
if version
|
||||
!= (if totalCount == 41_i32 as u64 {
|
||||
|
@ -1255,7 +1248,7 @@ unsafe extern "C" fn Generate(
|
|||
16_i32 as size_t,
|
||||
);
|
||||
let mut x2: uint64_t = ui128_quotient_mod(u.c2rust_unnamed.lo, u.c2rust_unnamed.hi);
|
||||
let mut x1: uint64_t =
|
||||
let x1: uint64_t =
|
||||
u.c2rust_unnamed
|
||||
.lo
|
||||
.wrapping_sub(x2.wrapping_mul(0x16a6b036d7f2a79_u64)) as uint64_t;
|
||||
|
@ -1307,13 +1300,13 @@ unsafe extern "C" fn Generate(
|
|||
.wrapping_add((e.c2rust_unnamed.encoded_lo < 0x16a6b036d7f2a79_u64) as i32 as u64)
|
||||
as uint64_t as uint64_t;
|
||||
} else {
|
||||
let mut x1_0: uint64_t = (if d_0.u[1_i32 as usize] as i32 % 2_i32 != 0 {
|
||||
let x1_0: uint64_t = (if d_0.u[1_i32 as usize] as i32 % 2_i32 != 0 {
|
||||
(d_0.u[1_i32 as usize] as u64).wrapping_add(0x16a6b036d7f2a79_u64)
|
||||
} else {
|
||||
d_0.u[1_i32 as usize] as u64
|
||||
})
|
||||
.wrapping_div(2_i32 as u64) as uint64_t;
|
||||
let mut x2sqr: uint64_t =
|
||||
let x2sqr: uint64_t =
|
||||
residue_sub(residue_mul(x1_0, x1_0), d_0.u[0_i32 as usize] as uint64_t);
|
||||
let mut x2_0: uint64_t = residue_sqrt(x2sqr);
|
||||
if x2_0 == 0xffffffffffffffff_u64 {
|
||||
|
@ -1332,12 +1325,12 @@ unsafe extern "C" fn Generate(
|
|||
as uint64_t as uint64_t;
|
||||
} else {
|
||||
let mut x1a: uint64_t = residue_sub(x1_0, x2_0);
|
||||
let mut y1: uint64_t = residue_sub(
|
||||
let y1: uint64_t = residue_sub(
|
||||
d_0.v[0_i32 as usize] as uint64_t,
|
||||
residue_mul(d_0.v[1_i32 as usize] as uint64_t, x1a),
|
||||
);
|
||||
let mut x2a: uint64_t = residue_add(x1_0, x2_0);
|
||||
let mut y2: uint64_t = residue_sub(
|
||||
let y2: uint64_t = residue_sub(
|
||||
d_0.v[0_i32 as usize] as uint64_t,
|
||||
residue_mul(d_0.v[1_i32 as usize] as uint64_t, x2a),
|
||||
);
|
||||
|
@ -1364,23 +1357,23 @@ unsafe extern "C" fn Generate(
|
|||
let mut decimal: [libc::c_uchar; 35] = [0; 35];
|
||||
i = 0_i32 as size_t;
|
||||
while i < 35_i32 as u64 {
|
||||
let mut c: u32 = (e.c2rust_unnamed_0.encoded[3_i32 as usize]).wrapping_rem(10_i32 as u32);
|
||||
let c: u32 = (e.c2rust_unnamed_0.encoded[3_i32 as usize]).wrapping_rem(10_i32 as u32);
|
||||
e.c2rust_unnamed_0.encoded[3_i32 as usize] = e.c2rust_unnamed_0.encoded[3_i32 as usize]
|
||||
.wrapping_div(10_i32 as u32)
|
||||
as uint32_t as uint32_t;
|
||||
let mut c2: u32 = ((c as uint64_t) << 32_i32
|
||||
let c2: u32 = ((c as uint64_t) << 32_i32
|
||||
| e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64)
|
||||
.wrapping_rem(10_i32 as u64) as u32;
|
||||
e.c2rust_unnamed_0.encoded[2_i32 as usize] =
|
||||
((c as uint64_t) << 32_i32 | e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64)
|
||||
.wrapping_div(10_i32 as u64) as uint32_t;
|
||||
let mut c3: u32 = ((c2 as uint64_t) << 32_i32
|
||||
let c3: u32 = ((c2 as uint64_t) << 32_i32
|
||||
| e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64)
|
||||
.wrapping_rem(10_i32 as u64) as u32;
|
||||
e.c2rust_unnamed_0.encoded[1_i32 as usize] =
|
||||
((c2 as uint64_t) << 32_i32 | e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64)
|
||||
.wrapping_div(10_i32 as u64) as uint32_t;
|
||||
let mut c4: u32 = ((c3 as uint64_t) << 32_i32
|
||||
let c4: u32 = ((c3 as uint64_t) << 32_i32
|
||||
| e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64)
|
||||
.wrapping_rem(10_i32 as u64) as u32;
|
||||
e.c2rust_unnamed_0.encoded[0_i32 as usize] =
|
||||
|
@ -1397,7 +1390,7 @@ unsafe extern "C" fn Generate(
|
|||
q = q.offset(1);
|
||||
*fresh2 = '-' as i32 as libc::c_char;
|
||||
}
|
||||
let mut p_0: *mut libc::c_uchar = decimal
|
||||
let p_0: *mut libc::c_uchar = decimal
|
||||
.as_mut_ptr()
|
||||
.offset(i.wrapping_mul(5_i32 as u64) as isize);
|
||||
*q.offset(0_i32 as isize) =
|
||||
|
|
Loading…
Add table
Reference in a new issue