From ecce5e797f6a5e0099ab912757613ffe84430683 Mon Sep 17 00:00:00 2001 From: Alex Page Date: Thu, 22 Jun 2023 01:17:52 -0400 Subject: [PATCH] Remove C-style types --- src/confid.rs | 496 ++++++++++++++++++++++++-------------------------- 1 file changed, 239 insertions(+), 257 deletions(-) diff --git a/src/confid.rs b/src/confid.rs index 1bdbca3..833eab9 100644 --- a/src/confid.rs +++ b/src/confid.rs @@ -13,20 +13,16 @@ use std::{ use thiserror::Error; type size_t = u64; -type int64_t = i64; -type uint16_t = u16; -type uint32_t = u32; -type uint64_t = u64; #[derive(Copy, Clone)] #[repr(C)] struct TDivisor { - u: [uint16_t; 2], - v: [uint16_t; 2], + u: [u16; 2], + v: [u16; 2], } #[derive(Copy, Clone)] #[repr(C)] struct C2RustUnnamed { - encoded: [uint32_t; 4], + encoded: [u32; 4], } #[derive(Copy, Clone)] #[repr(C)] @@ -37,14 +33,14 @@ union C2RustUnnamed_0 { #[derive(Copy, Clone)] #[repr(C)] struct C2RustUnnamed_1 { - encoded_lo: uint64_t, - encoded_hi: uint64_t, + encoded_lo: u64, + encoded_hi: u64, } #[derive(Copy, Clone)] #[repr(C)] struct C2RustUnnamed_2 { - lo: uint64_t, - hi: uint64_t, + lo: u64, + hi: u64, } #[derive(Copy, Clone)] #[repr(C)] @@ -55,34 +51,37 @@ union C2RustUnnamed_3 { #[derive(Copy, Clone)] #[repr(C, packed)] struct C2RustUnnamed_4 { - HardwareID: uint64_t, - ProductIDLow: uint64_t, + HardwareID: u64, + ProductIDLow: u64, ProductIDHigh: u8, KeySHA1: u16, } -static mut f: [uint64_t; 6] = [ - 0_i32 as uint64_t, - 0x21840136c85381_u64 as uint64_t, - 0x44197b83892ad0_u64 as uint64_t, - 0x1400606322b3b04_u64 as uint64_t, - 0x1400606322b3b04_u64 as uint64_t, - 1_i32 as uint64_t, +static mut f: [u64; 6] = [ + 0, + 0x21840136c85381, + 0x44197b83892ad0, + 0x1400606322b3b04, + 0x1400606322b3b04, + 1, ]; -fn residue_add(x: uint64_t, y: uint64_t) -> uint64_t { - let mut z: uint64_t = x.wrapping_add(y); +const MOD: u64 = 0x16A6B036D7F2A79; +const BAD: u64 = 0xffffffffffffffff; + +fn residue_add(x: u64, y: u64) -> u64 { + let mut z: u64 = x.wrapping_add(y); //z = z - (z >= MOD ? MOD : 0); - if z >= 0x16a6b036d7f2a79_u64 { - z = z.wrapping_sub(0x16a6b036d7f2a79_u64) as uint64_t as uint64_t; + if z >= MOD { + z = z.wrapping_sub(MOD); } z } -fn residue_sub(x: uint64_t, y: uint64_t) -> uint64_t { - let mut z: uint64_t = x.wrapping_sub(y); +fn residue_sub(x: u64, y: u64) -> u64 { + let mut z: u64 = x.wrapping_sub(y); //z += (x < y ? MOD : 0); if x < y { - z = z.wrapping_add(0x16a6b036d7f2a79_u64) as uint64_t as uint64_t; + z = z.wrapping_add(MOD); } z } @@ -116,24 +115,24 @@ fn ui128_quotient_mod(lo: u64, hi: u64) -> u64 { prod3lo >> 42_i32 | prod3hi << 22_i32 } -fn residue_mul(x: uint64_t, y: uint64_t) -> uint64_t { +fn residue_mul(x: u64, y: u64) -> u64 { // * ceil(2**170/MOD) = 0x2d351 c6d04f8b|604fa6a1 c6346a87 for (p-1)*(p-1) max - let mut hi: uint64_t = 0; - let lo: uint64_t = umul128(x, y, &mut hi); - let quotient: uint64_t = ui128_quotient_mod(lo, hi); - lo.wrapping_sub(quotient.wrapping_mul(0x16a6b036d7f2a79_u64)) as uint64_t + let mut hi: u64 = 0; + let lo: u64 = umul128(x, y, &mut hi); + let quotient: u64 = ui128_quotient_mod(lo, hi); + lo.wrapping_sub(quotient.wrapping_mul(MOD)) } -fn residue_pow(x: uint64_t, mut y: uint64_t) -> uint64_t { +fn residue_pow(x: u64, mut y: u64) -> u64 { if y == 0_i32 as u64 { - return 1_i32 as uint64_t; + return 1_i32 as u64; } - let mut cur: uint64_t = x; + let mut cur: u64 = x; while y & 1_i32 as u64 == 0 { cur = residue_mul(cur, cur); y >>= 1_i32; } - let mut res: uint64_t = cur; + let mut res: u64 = cur; loop { y >>= 1_i32; if y == 0_i32 as u64 { @@ -147,37 +146,36 @@ fn residue_pow(x: uint64_t, mut y: uint64_t) -> uint64_t { res } -fn inverse(mut u: uint64_t, mut v: uint64_t) -> uint64_t { +fn inverse(mut u: u64, mut v: u64) -> u64 { let mut tmp; - let mut xu: int64_t = 1_i32 as int64_t; - let mut xv: int64_t = 0_i32 as int64_t; - let v0: uint64_t = v; + let mut xu: i64 = 1_i32 as i64; + let mut xv: i64 = 0_i32 as i64; + let v0: u64 = v; while u > 1_i32 as u64 { - let d: uint64_t = v.wrapping_div(u); - let remainder: uint64_t = v.wrapping_rem(u); - tmp = u as int64_t; + let d: u64 = v.wrapping_div(u); + let remainder: u64 = v.wrapping_rem(u); + tmp = u as i64; u = remainder; - v = tmp as uint64_t; + v = tmp as u64; tmp = xu; - xu = (xv as u64).wrapping_sub(d.wrapping_mul(xu as u64)) as int64_t; + xu = (xv as u64).wrapping_sub(d.wrapping_mul(xu as u64)) as i64; xv = tmp; } - xu = (xu as u64).wrapping_add(if xu < 0_i32 as i64 { v0 } else { 0_i32 as u64 }) as int64_t - as int64_t; - xu as uint64_t + xu = (xu as u64).wrapping_add(if xu < 0_i32 as i64 { v0 } else { 0_i32 as u64 }) as i64; + xu as u64 } -fn residue_inv(x: uint64_t) -> uint64_t { - inverse(x, 0x16a6b036d7f2a79_u64 as uint64_t) +fn residue_inv(x: u64) -> u64 { + inverse(x, MOD) } -fn residue_sqrt(what: uint64_t) -> uint64_t { +fn residue_sqrt(what: u64) -> u64 { if what == 0 { - return 0_i32 as uint64_t; + return 0_i32 as u64; } - let g: uint64_t = 43_i32 as uint64_t; - let mut e: uint64_t = 0_i32 as uint64_t; - let mut q: uint64_t = 0x16a6b036d7f2a79_u64.wrapping_sub(1_i32 as u64) as uint64_t; + let g: u64 = 43_i32 as u64; + let mut e: u64 = 0_i32 as u64; + let mut q: u64 = MOD.wrapping_sub(1_i32 as u64); while q & 1_i32 as u64 == 0 { e = e.wrapping_add(1); q >>= 1_i32; @@ -192,8 +190,8 @@ fn residue_sqrt(what: uint64_t) -> uint64_t { let mut b = residue_mul(residue_mul(what, x), x); x = residue_mul(what, x); while b != 1_i32 as u64 { - let mut m: uint64_t = 0_i32 as uint64_t; - let mut b2: uint64_t = b; + let mut m: u64 = 0_i32 as u64; + let mut b2: u64 = b; loop { m = m.wrapping_add(1); b2 = residue_mul(b2, b2); @@ -202,11 +200,11 @@ fn residue_sqrt(what: uint64_t) -> uint64_t { } } if m == r { - return 0xffffffffffffffff_u64 as uint64_t; + return BAD; } let t = residue_pow( y, - (1_i32 << r.wrapping_sub(m).wrapping_sub(1_i32 as u64)) as uint64_t, + (1_i32 << r.wrapping_sub(m).wrapping_sub(1_i32 as u64)) as u64, ); y = residue_mul(t, t); r = m; @@ -214,7 +212,7 @@ fn residue_sqrt(what: uint64_t) -> uint64_t { b = residue_mul(b, y); } if residue_mul(x, x) != what { - return 0xffffffffffffffff_u64 as uint64_t; + return BAD; } x } @@ -224,14 +222,14 @@ unsafe fn find_divisor_v(mut d: *mut TDivisor) -> i32 { // u = u0 + u1*x + x^2 // f%u = f0 + f1*x let mut v1; - let mut f2: [uint64_t; 6] = [0; 6]; + let mut f2: [u64; 6] = [0; 6]; let mut i: i32 = 0_i32; while i < 6_i32 { f2[i as usize] = f[i as usize]; i += 1; } - let u0: uint64_t = (*d).u[0_i32 as usize] as uint64_t; - let u1: uint64_t = (*d).u[1_i32 as usize] as uint64_t; + let u0: u64 = (*d).u[0_i32 as usize] as u64; + let u1: u64 = (*d).u[1_i32 as usize] as u64; let mut j: i32 = 4_i32; loop { let fresh0 = j; @@ -244,7 +242,7 @@ unsafe fn find_divisor_v(mut d: *mut TDivisor) -> i32 { f2[(j + 1_i32) as usize], residue_mul(u1, f2[(j + 2_i32) as usize]), ); - f2[(j + 2_i32) as usize] = 0_i32 as uint64_t; + f2[(j + 2_i32) as usize] = 0_i32 as u64; } // v = v0 + v1*x // u | (v0^2 - f0) + (2*v0*v1 - f1)*x + v1^2*x^2 = u0*v1^2 + u1*v1^2*x + v1^2*x^2 @@ -253,11 +251,11 @@ unsafe fn find_divisor_v(mut d: *mut TDivisor) -> i32 { // v0^2 = f0 + u0*v1^2 = (f1 + u1*v1^2)^2 / (2*v1)^2 // (f1^2) + 2*(f1*u1-2*f0) * v1^2 + (u1^2-4*u0) * v1^4 = 0 // v1^2 = ((2*f0-f1*u1) +- 2*sqrt(-f0*f1*u1 + f0^2 + f1^2*u0))) / (u1^2-4*u0) - let f0: uint64_t = f2[0_i32 as usize]; - let f1: uint64_t = f2[1_i32 as usize]; - let u0double: uint64_t = residue_add(u0, u0); - let coeff2: uint64_t = residue_sub(residue_mul(u1, u1), residue_add(u0double, u0double)); - let coeff1: uint64_t = residue_sub(residue_add(f0, f0), residue_mul(f1, u1)); + let f0: u64 = f2[0_i32 as usize]; + let f1: u64 = f2[1_i32 as usize]; + let u0double: u64 = residue_add(u0, u0); + let coeff2: u64 = residue_sub(residue_mul(u1, u1), residue_add(u0double, u0double)); + let coeff1: u64 = residue_sub(residue_add(f0, f0), residue_mul(f1, u1)); if coeff2 == 0_i32 as u64 { if coeff1 == 0_i32 as u64 { if f1 == 0_i32 as u64 { @@ -266,59 +264,59 @@ unsafe fn find_divisor_v(mut d: *mut TDivisor) -> i32 { } return 0_i32; } - let sqr: uint64_t = residue_mul( + let sqr: u64 = residue_mul( residue_mul(f1, f1), residue_inv(residue_add(coeff1, coeff1)), ); v1 = residue_sqrt(sqr); - if v1 == 0xffffffffffffffff_u64 { + if v1 == BAD { return 0_i32; } } else { - let mut d_0: uint64_t = residue_add( + let mut d_0: u64 = residue_add( residue_mul(f0, f0), residue_mul(f1, residue_sub(residue_mul(f1, u0), residue_mul(f0, u1))), ); d_0 = residue_sqrt(d_0); - if d_0 == 0xffffffffffffffff_u64 { + if d_0 == BAD { return 0_i32; } d_0 = residue_add(d_0, d_0); - let inv: uint64_t = residue_inv(coeff2); - let mut root: uint64_t = residue_mul(residue_add(coeff1, d_0), inv); + let inv: u64 = residue_inv(coeff2); + let mut root: u64 = residue_mul(residue_add(coeff1, d_0), inv); v1 = residue_sqrt(root); - if v1 == 0xffffffffffffffff_u64 { + if v1 == BAD { root = residue_mul(residue_sub(coeff1, d_0), inv); v1 = residue_sqrt(root); - if v1 == 0xffffffffffffffff_u64 { + if v1 == BAD { return 0_i32; } } } - let v0: uint64_t = residue_mul( + let v0: u64 = residue_mul( residue_add(f1, residue_mul(u1, residue_mul(v1, v1))), residue_inv(residue_add(v1, v1)), ); - (*d).v[0_i32 as usize] = v0 as uint16_t; - (*d).v[1_i32 as usize] = v1 as uint16_t; + (*d).v[0_i32 as usize] = v0 as u16; + (*d).v[1_i32 as usize] = v1 as u16; 1_i32 } /// generic short slow code unsafe fn polynomial_mul( adeg: i32, - a: *const uint64_t, + a: *const u64, bdeg: i32, - b: *const uint64_t, + b: *const u64, mut resultprevdeg: i32, - result: *mut uint64_t, + result: *mut u64, ) -> i32 { if adeg < 0_i32 || bdeg < 0_i32 { return resultprevdeg; } let mut i = resultprevdeg + 1_i32; while i <= adeg + bdeg { - *result.offset(i as isize) = 0_i32 as uint64_t; + *result.offset(i as isize) = 0_i32 as u64; i += 1; } resultprevdeg = i - 1_i32; @@ -342,14 +340,14 @@ unsafe fn polynomial_mul( unsafe fn polynomial_div_monic( adeg: i32, - a: *mut uint64_t, + a: *mut u64, bdeg: i32, - b: *const uint64_t, - quotient: *mut uint64_t, + b: *const u64, + quotient: *mut u64, ) -> i32 { let mut i = adeg - bdeg; while i >= 0_i32 { - let q: uint64_t = *a.offset((i + bdeg) as isize); + let q: u64 = *a.offset((i + bdeg) as isize); if !quotient.is_null() { *quotient.offset(i as isize) = q; } @@ -361,7 +359,7 @@ unsafe fn polynomial_div_monic( ); j += 1; } - *a.offset((i + j) as isize) = 0_i32 as uint64_t; + *a.offset((i + j) as isize) = 0_i32 as u64; i -= 1; } i += bdeg; @@ -373,30 +371,30 @@ unsafe fn polynomial_div_monic( unsafe fn polynomial_xgcd( adeg: i32, - a: *const uint64_t, + a: *const u64, bdeg: i32, - b: *const uint64_t, + b: *const u64, pgcddeg: *mut i32, - gcd: *mut uint64_t, + gcd: *mut u64, pmult1deg: *mut i32, - mult1: *mut uint64_t, + mult1: *mut u64, pmult2deg: *mut i32, - mult2: *mut uint64_t, + mult2: *mut u64, ) { let mut sdeg: i32 = -1_i32; - let mut s: [uint64_t; 3] = [0_i32 as uint64_t, 0_i32 as uint64_t, 0_i32 as uint64_t]; + let mut s: [u64; 3] = [0_i32 as u64, 0_i32 as u64, 0_i32 as u64]; let mut mult1deg: i32 = 0_i32; - *mult1.offset(0_i32 as isize) = 1_i32 as uint64_t; - *mult1.offset(1_i32 as isize) = 0_i32 as uint64_t; - *mult1.offset(2_i32 as isize) = 0_i32 as uint64_t; + *mult1.offset(0_i32 as isize) = 1_i32 as u64; + *mult1.offset(1_i32 as isize) = 0_i32 as u64; + *mult1.offset(2_i32 as isize) = 0_i32 as u64; let mut tdeg: i32 = 0_i32; - let mut t: [uint64_t; 3] = [1_i32 as uint64_t, 0_i32 as uint64_t, 0_i32 as uint64_t]; + let mut t: [u64; 3] = [1_i32 as u64, 0_i32 as u64, 0_i32 as u64]; let mut mult2deg: i32 = -1_i32; - *mult2.offset(0_i32 as isize) = 0_i32 as uint64_t; - *mult2.offset(1_i32 as isize) = 0_i32 as uint64_t; - *mult2.offset(2_i32 as isize) = 0_i32 as uint64_t; + *mult2.offset(0_i32 as isize) = 0_i32 as u64; + *mult2.offset(1_i32 as isize) = 0_i32 as u64; + *mult2.offset(2_i32 as isize) = 0_i32 as u64; let mut rdeg: i32 = bdeg; - let mut r: [uint64_t; 3] = [ + let mut r: [u64; 3] = [ *b.offset(0_i32 as isize), *b.offset(1_i32 as isize), *b.offset(2_i32 as isize), @@ -443,7 +441,7 @@ unsafe fn polynomial_xgcd( *mult2.offset(2_i32 as isize) = tmp2; } else { let delta: i32 = gcddeg - rdeg; - let mult: uint64_t = + let mult: u64 = residue_mul(*gcd.offset(gcddeg as isize), residue_inv(r[rdeg as usize])); // quotient = mult * x**delta let mut i: i32 = 0_i32; @@ -493,47 +491,47 @@ unsafe fn polynomial_xgcd( *pmult2deg = mult2deg; } -unsafe fn u2poly(src: *const TDivisor, polyu: *mut uint64_t, polyv: *mut uint64_t) -> i32 { - if (*src).u[1_i32 as usize] as u64 != 0xffffffffffffffff_u64 { - *polyu.offset(0_i32 as isize) = (*src).u[0_i32 as usize] as uint64_t; - *polyu.offset(1_i32 as isize) = (*src).u[1_i32 as usize] as uint64_t; - *polyu.offset(2_i32 as isize) = 1_i32 as uint64_t; - *polyv.offset(0_i32 as isize) = (*src).v[0_i32 as usize] as uint64_t; - *polyv.offset(1_i32 as isize) = (*src).v[1_i32 as usize] as uint64_t; +unsafe fn u2poly(src: *const TDivisor, polyu: *mut u64, polyv: *mut u64) -> i32 { + if (*src).u[1_i32 as usize] as u64 != BAD { + *polyu.offset(0_i32 as isize) = (*src).u[0_i32 as usize] as u64; + *polyu.offset(1_i32 as isize) = (*src).u[1_i32 as usize] as u64; + *polyu.offset(2_i32 as isize) = 1_i32 as u64; + *polyv.offset(0_i32 as isize) = (*src).v[0_i32 as usize] as u64; + *polyv.offset(1_i32 as isize) = (*src).v[1_i32 as usize] as u64; return 2_i32; } - if (*src).u[0_i32 as usize] as u64 != 0xffffffffffffffff_u64 { - *polyu.offset(0_i32 as isize) = (*src).u[0_i32 as usize] as uint64_t; - *polyu.offset(1_i32 as isize) = 1_i32 as uint64_t; - *polyv.offset(0_i32 as isize) = (*src).v[0_i32 as usize] as uint64_t; - *polyv.offset(1_i32 as isize) = 0_i32 as uint64_t; + if (*src).u[0_i32 as usize] as u64 != BAD { + *polyu.offset(0_i32 as isize) = (*src).u[0_i32 as usize] as u64; + *polyu.offset(1_i32 as isize) = 1_i32 as u64; + *polyv.offset(0_i32 as isize) = (*src).v[0_i32 as usize] as u64; + *polyv.offset(1_i32 as isize) = 0_i32 as u64; return 1_i32; } - *polyu.offset(0_i32 as isize) = 1_i32 as uint64_t; - *polyv.offset(0_i32 as isize) = 0_i32 as uint64_t; - *polyv.offset(1_i32 as isize) = 0_i32 as uint64_t; + *polyu.offset(0_i32 as isize) = 1_i32 as u64; + *polyv.offset(0_i32 as isize) = 0_i32 as u64; + *polyv.offset(1_i32 as isize) = 0_i32 as u64; 0_i32 } unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mut TDivisor) { - let mut u1: [uint64_t; 3] = [0; 3]; - let mut u2: [uint64_t; 3] = [0; 3]; - let mut v1: [uint64_t; 2] = [0; 2]; - let mut v2: [uint64_t; 2] = [0; 2]; + let mut u1: [u64; 3] = [0; 3]; + let mut u2: [u64; 3] = [0; 3]; + let mut v1: [u64; 2] = [0; 2]; + let mut v2: [u64; 2] = [0; 2]; let u1deg: i32 = u2poly(src1, u1.as_mut_ptr(), v1.as_mut_ptr()); let u2deg: i32 = u2poly(src2, u2.as_mut_ptr(), v2.as_mut_ptr()); // extended gcd: d1 = gcd(u1, u2) = e1*u1 + e2*u2 let mut d1deg: i32 = 0; let mut e1deg: i32 = 0; let mut e2deg: i32 = 0; - let mut d1: [uint64_t; 3] = [0; 3]; - let mut e1: [uint64_t; 3] = [0; 3]; - let mut e2: [uint64_t; 3] = [0; 3]; + let mut d1: [u64; 3] = [0; 3]; + let mut e1: [u64; 3] = [0; 3]; + let mut e2: [u64; 3] = [0; 3]; polynomial_xgcd( u1deg, - u1.as_mut_ptr() as *const uint64_t, + u1.as_mut_ptr() as *const u64, u2deg, - u2.as_mut_ptr() as *const uint64_t, + u2.as_mut_ptr() as *const u64, &mut d1deg, d1.as_mut_ptr(), &mut e1deg, @@ -542,10 +540,10 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu e2.as_mut_ptr(), ); // extended gcd again: d = gcd(d1, v1+v2) = c1*d1 + c2*(v1+v2) - let mut b: [uint64_t; 3] = [ + let mut b: [u64; 3] = [ residue_add(v1[0_i32 as usize], v2[0_i32 as usize]), residue_add(v1[1_i32 as usize], v2[1_i32 as usize]), - 0_i32 as uint64_t, + 0_i32 as u64, ]; let bdeg: i32 = if b[1_i32 as usize] == 0_i32 as u64 { if b[0_i32 as usize] == 0_i32 as u64 { @@ -559,14 +557,14 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu let mut ddeg: i32 = 0; let mut c1deg: i32 = 0; let mut c2deg: i32 = 0; - let mut d: [uint64_t; 3] = [0; 3]; - let mut c1: [uint64_t; 3] = [0; 3]; - let mut c2: [uint64_t; 3] = [0; 3]; + let mut d: [u64; 3] = [0; 3]; + let mut c1: [u64; 3] = [0; 3]; + let mut c2: [u64; 3] = [0; 3]; polynomial_xgcd( d1deg, - d1.as_mut_ptr() as *const uint64_t, + d1.as_mut_ptr() as *const u64, bdeg, - b.as_mut_ptr() as *const uint64_t, + b.as_mut_ptr() as *const u64, &mut ddeg, d.as_mut_ptr(), &mut c1deg, @@ -574,13 +572,13 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu &mut c2deg, c2.as_mut_ptr(), ); - let dmult: uint64_t = residue_inv(d[ddeg as usize]); + let dmult: u64 = residue_inv(d[ddeg as usize]); let mut i = 0_i32; while i < ddeg { d[i as usize] = residue_mul(d[i as usize], dmult); i += 1; } - d[i as usize] = 1_i32 as uint64_t; + d[i as usize] = 1_i32 as u64; i = 0_i32; while i <= c1deg { c1[i as usize] = residue_mul(c1[i as usize], dmult); @@ -591,43 +589,43 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu c2[i as usize] = residue_mul(c2[i as usize], dmult); i += 1; } - let mut u: [uint64_t; 5] = [0; 5]; + let mut u: [u64; 5] = [0; 5]; let mut udeg: i32 = polynomial_mul( u1deg, - u1.as_mut_ptr() as *const uint64_t, + u1.as_mut_ptr() as *const u64, u2deg, - u2.as_mut_ptr() as *const uint64_t, + u2.as_mut_ptr() as *const u64, -1_i32, u.as_mut_ptr(), ); // u is monic - let mut v: [uint64_t; 7] = [0; 7]; - let mut tmp: [uint64_t; 7] = [0; 7]; + let mut v: [u64; 7] = [0; 7]; + let mut tmp: [u64; 7] = [0; 7]; // c1*(e1*u1*v2 + e2*u2*v1) + c2*(v1*v2 + f) // c1*(e1*u1*(v2-v1) + d1*v1) + c2*(v1*v2 + f) v[0_i32 as usize] = residue_sub(v2[0_i32 as usize], v1[0_i32 as usize]); v[1_i32 as usize] = residue_sub(v2[1_i32 as usize], v1[1_i32 as usize]); let mut tmpdeg = polynomial_mul( e1deg, - e1.as_mut_ptr() as *const uint64_t, + e1.as_mut_ptr() as *const u64, 1_i32, - v.as_mut_ptr() as *const uint64_t, + v.as_mut_ptr() as *const u64, -1_i32, tmp.as_mut_ptr(), ); let mut vdeg = polynomial_mul( u1deg, - u1.as_mut_ptr() as *const uint64_t, + u1.as_mut_ptr() as *const u64, tmpdeg, - tmp.as_mut_ptr() as *const uint64_t, + tmp.as_mut_ptr() as *const u64, -1_i32, v.as_mut_ptr(), ); vdeg = polynomial_mul( d1deg, - d1.as_mut_ptr() as *const uint64_t, + d1.as_mut_ptr() as *const u64, 1_i32, - v1.as_mut_ptr() as *const uint64_t, + v1.as_mut_ptr() as *const u64, vdeg, v.as_mut_ptr(), ); @@ -645,27 +643,27 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu tmpdeg = 5_i32; tmpdeg = polynomial_mul( 1_i32, - v1.as_mut_ptr() as *const uint64_t, + v1.as_mut_ptr() as *const u64, 1_i32, - v2.as_mut_ptr() as *const uint64_t, + v2.as_mut_ptr() as *const u64, tmpdeg, tmp.as_mut_ptr(), ); vdeg = polynomial_mul( c2deg, - c2.as_mut_ptr() as *const uint64_t, + c2.as_mut_ptr() as *const u64, tmpdeg, - tmp.as_mut_ptr() as *const uint64_t, + tmp.as_mut_ptr() as *const u64, vdeg, v.as_mut_ptr(), ); if ddeg > 0_i32 { - let mut udiv: [uint64_t; 5] = [0; 5]; + let mut udiv: [u64; 5] = [0; 5]; polynomial_div_monic( udeg, u.as_mut_ptr(), ddeg, - d.as_mut_ptr() as *const uint64_t, + d.as_mut_ptr() as *const u64, udiv.as_mut_ptr(), ); udeg -= ddeg; @@ -673,7 +671,7 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu udeg, udiv.as_mut_ptr(), ddeg, - d.as_mut_ptr() as *const uint64_t, + d.as_mut_ptr() as *const u64, u.as_mut_ptr(), ); udeg -= ddeg; @@ -682,7 +680,7 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu vdeg, v.as_mut_ptr(), ddeg, - d.as_mut_ptr() as *const uint64_t, + d.as_mut_ptr() as *const u64, udiv.as_mut_ptr(), ); vdeg -= ddeg; @@ -695,16 +693,16 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu vdeg, v.as_mut_ptr(), udeg, - u.as_mut_ptr() as *const uint64_t, - std::ptr::null_mut::(), + u.as_mut_ptr() as *const u64, + std::ptr::null_mut::(), ); while udeg > 2_i32 { // u' = monic((f-v^2)/u), v'=-v mod u' tmpdeg = polynomial_mul( vdeg, - v.as_mut_ptr() as *const uint64_t, + v.as_mut_ptr() as *const u64, vdeg, - v.as_mut_ptr() as *const uint64_t, + v.as_mut_ptr() as *const u64, -1_i32, tmp.as_mut_ptr(), ); @@ -714,7 +712,7 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu i += 1; } while i <= tmpdeg { - tmp[i as usize] = residue_sub(0_i32 as uint64_t, tmp[i as usize]); + tmp[i as usize] = residue_sub(0_i32 as u64, tmp[i as usize]); i += 1; } while i <= 5_i32 { @@ -722,76 +720,76 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu i += 1; } tmpdeg = i - 1_i32; - let mut udiv_0: [uint64_t; 5] = [0; 5]; + let mut udiv_0: [u64; 5] = [0; 5]; polynomial_div_monic( tmpdeg, tmp.as_mut_ptr(), udeg, - u.as_mut_ptr() as *const uint64_t, + u.as_mut_ptr() as *const u64, udiv_0.as_mut_ptr(), ); udeg = tmpdeg - udeg; - let mult: uint64_t = residue_inv(udiv_0[udeg as usize]); + let mult: u64 = residue_inv(udiv_0[udeg as usize]); i = 0_i32; while i < udeg { u[i as usize] = residue_mul(udiv_0[i as usize], mult); i += 1; } - u[i as usize] = 1_i32 as uint64_t; + u[i as usize] = 1_i32 as u64; i = 0_i32; while i <= vdeg { - v[i as usize] = residue_sub(0_i32 as uint64_t, v[i as usize]); + v[i as usize] = residue_sub(0_i32 as u64, v[i as usize]); i += 1; } vdeg = polynomial_div_monic( vdeg, v.as_mut_ptr(), udeg, - u.as_mut_ptr() as *const uint64_t, - std::ptr::null_mut::(), + u.as_mut_ptr() as *const u64, + std::ptr::null_mut::(), ); } if udeg == 2_i32 { - (*dst).u[0_i32 as usize] = u[0_i32 as usize] as uint16_t; - (*dst).u[1_i32 as usize] = u[1_i32 as usize] as uint16_t; + (*dst).u[0_i32 as usize] = u[0_i32 as usize] as u16; + (*dst).u[1_i32 as usize] = u[1_i32 as usize] as u16; (*dst).v[0_i32 as usize] = (if vdeg >= 0_i32 { v[0_i32 as usize] } else { 0_i32 as u64 - }) as uint16_t; + }) as u16; (*dst).v[1_i32 as usize] = (if vdeg >= 1_i32 { v[1_i32 as usize] } else { 0_i32 as u64 - }) as uint16_t; + }) as u16; } else if udeg == 1_i32 { - (*dst).u[0_i32 as usize] = u[0_i32 as usize] as uint16_t; - (*dst).u[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t; + (*dst).u[0_i32 as usize] = u[0_i32 as usize] as u16; + (*dst).u[1_i32 as usize] = BAD as u16; (*dst).v[0_i32 as usize] = (if vdeg >= 0_i32 { v[0_i32 as usize] } else { 0_i32 as u64 - }) as uint16_t; - (*dst).v[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t; + }) as u16; + (*dst).v[1_i32 as usize] = BAD as u16; } else { - (*dst).u[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t; - (*dst).u[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t; - (*dst).v[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t; - (*dst).v[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t; + (*dst).u[0_i32 as usize] = BAD as u16; + (*dst).u[1_i32 as usize] = BAD as u16; + (*dst).v[0_i32 as usize] = BAD as u16; + (*dst).v[1_i32 as usize] = BAD as u16; }; } unsafe fn divisor_mul128( src: *const TDivisor, - mut mult_lo: uint64_t, - mut mult_hi: uint64_t, + mut mult_lo: u64, + mut mult_hi: u64, mut dst: *mut TDivisor, ) { if mult_lo == 0_i32 as u64 && mult_hi == 0_i32 as u64 { - (*dst).u[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t; - (*dst).u[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t; - (*dst).v[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t; - (*dst).v[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t; + (*dst).u[0_i32 as usize] = BAD as u16; + (*dst).u[1_i32 as usize] = BAD as u16; + (*dst).v[0_i32 as usize] = BAD as u16; + (*dst).v[1_i32 as usize] = BAD as u16; return; } let mut cur: TDivisor = *src; @@ -799,7 +797,7 @@ unsafe fn divisor_mul128( divisor_add(&cur, &cur, &mut cur); mult_lo >>= 1_i32; if mult_hi & 1_i32 as u64 != 0 { - mult_lo = (mult_lo | 1_u64 << 63_i32) as uint64_t; + mult_lo |= 1_u64 << 63_i32; } mult_hi >>= 1_i32; } @@ -807,7 +805,7 @@ unsafe fn divisor_mul128( loop { mult_lo >>= 1_i32; if mult_hi & 1_i32 as u64 != 0 { - mult_lo = (mult_lo | 1_u64 << 63_i32) as uint64_t; + mult_lo |= 1_u64 << 63_i32; } mult_hi >>= 1_i32; if mult_lo == 0_i32 as u64 && mult_hi == 0_i32 as u64 { @@ -1150,9 +1148,9 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) -> keybuf.as_mut_ptr() as *mut c_void, 8, ); - let mut productIdMixed: uint64_t = (productId1 as uint64_t) << 41_i32 - | (productId2 as uint64_t) << 58_i32 - | (productId3 as uint64_t) << 17_i32 + let mut productIdMixed: u64 = (productId1 as u64) << 41_i32 + | (productId2 as u64) << 58_i32 + | (productId3 as u64) << 17_i32 | productId4 as u64; ptr::copy_nonoverlapping( &mut productIdMixed as *mut u64 as *const c_void, @@ -1166,8 +1164,8 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) -> let mut attempt = 0_i32 as u8; while attempt as i32 <= 0x80_i32 { let mut u: C2RustUnnamed_3 = C2RustUnnamed_3 { buffer: [0; 14] }; - u.c2rust_unnamed.lo = 0_i32 as uint64_t; - u.c2rust_unnamed.hi = 0_i32 as uint64_t; + u.c2rust_unnamed.lo = 0_i32 as u64; + u.c2rust_unnamed.hi = 0_i32 as u64; u.buffer[7_i32 as usize] = attempt; Mix( (u.buffer).as_mut_ptr(), @@ -1175,17 +1173,14 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) -> keybuf.as_mut_ptr(), 16_i32 as size_t, ); - let mut x2: uint64_t = ui128_quotient_mod(u.c2rust_unnamed.lo, u.c2rust_unnamed.hi); - let x1: uint64_t = - u.c2rust_unnamed - .lo - .wrapping_sub(x2.wrapping_mul(0x16a6b036d7f2a79_u64)) as uint64_t; + let mut x2: u64 = ui128_quotient_mod(u.c2rust_unnamed.lo, u.c2rust_unnamed.hi); + let x1: u64 = u.c2rust_unnamed.lo.wrapping_sub(x2.wrapping_mul(MOD)); x2 = x2.wrapping_add(1); d_0.u[0_i32 as usize] = residue_sub( residue_mul(x1, x1), - residue_mul(43_i32 as uint64_t, residue_mul(x2, x2)), - ) as uint16_t; - d_0.u[1_i32 as usize] = residue_add(x1, x1) as uint16_t; + residue_mul(43_i32 as u64, residue_mul(x2, x2)), + ) as u16; + d_0.u[1_i32 as usize] = residue_add(x1, x1) as u16; if find_divisor_v(&mut d_0) != 0 { break; } @@ -1196,8 +1191,8 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) -> } divisor_mul128( &d_0, - 0x4e21b9d10f127c1_i64 as uint64_t, - 0x40da7c36d44c_i64 as uint64_t, + 0x4e21b9d10f127c1_i64 as u64, + 0x40da7c36d44c_i64 as u64, &mut d_0, ); let mut e: C2RustUnnamed_0 = C2RustUnnamed_0 { @@ -1206,63 +1201,56 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) -> encoded_hi: 0, }, }; - if d_0.u[0_i32 as usize] as u64 == 0xffffffffffffffff_u64 { + if d_0.u[0_i32 as usize] as u64 == BAD { // we can not get the zero divisor, actually... e.c2rust_unnamed.encoded_lo = umul128( - 0x16a6b036d7f2a79_u64.wrapping_add(2_i32 as u64) as uint64_t, - 0x16a6b036d7f2a79_u64 as uint64_t, + MOD.wrapping_add(2_i32 as u64), + MOD, &mut e.c2rust_unnamed.encoded_hi, ); - } else if d_0.u[1_i32 as usize] as u64 == 0xffffffffffffffff_u64 { + } else if d_0.u[1_i32 as usize] as u64 == BAD { e.c2rust_unnamed.encoded_lo = umul128( - 0x16a6b036d7f2a79_u64.wrapping_add(1_i32 as u64) as uint64_t, - d_0.u[0_i32 as usize] as uint64_t, + MOD.wrapping_add(1_i32 as u64), + d_0.u[0_i32 as usize] as u64, &mut e.c2rust_unnamed.encoded_hi, ); - e.c2rust_unnamed.encoded_lo = - e.c2rust_unnamed - .encoded_lo - .wrapping_add(0x16a6b036d7f2a79_u64) as uint64_t as uint64_t; + e.c2rust_unnamed.encoded_lo = e.c2rust_unnamed.encoded_lo.wrapping_add(MOD); e.c2rust_unnamed.encoded_hi = e .c2rust_unnamed .encoded_hi - .wrapping_add((e.c2rust_unnamed.encoded_lo < 0x16a6b036d7f2a79_u64) as i32 as u64) - as uint64_t as uint64_t; + .wrapping_add((e.c2rust_unnamed.encoded_lo < MOD) as i32 as u64); } else { - let x1_0: uint64_t = (if d_0.u[1_i32 as usize] as i32 % 2_i32 != 0 { - (d_0.u[1_i32 as usize] as u64).wrapping_add(0x16a6b036d7f2a79_u64) + let x1_0: u64 = (if d_0.u[1_i32 as usize] as i32 % 2_i32 != 0 { + (d_0.u[1_i32 as usize] as u64).wrapping_add(MOD) } else { d_0.u[1_i32 as usize] as u64 }) - .wrapping_div(2_i32 as u64) as uint64_t; - let x2sqr: uint64_t = - residue_sub(residue_mul(x1_0, x1_0), d_0.u[0_i32 as usize] as uint64_t); - let mut x2_0: uint64_t = residue_sqrt(x2sqr); - if x2_0 == 0xffffffffffffffff_u64 { - x2_0 = residue_sqrt(residue_mul(x2sqr, residue_inv(43_i32 as uint64_t))); + .wrapping_div(2_i32 as u64); + let x2sqr: u64 = residue_sub(residue_mul(x1_0, x1_0), d_0.u[0_i32 as usize] as u64); + let mut x2_0: u64 = residue_sqrt(x2sqr); + if x2_0 == BAD { + x2_0 = residue_sqrt(residue_mul(x2sqr, residue_inv(43_i32 as u64))); e.c2rust_unnamed.encoded_lo = umul128( - 0x16a6b036d7f2a79_u64.wrapping_add(1_i32 as u64) as uint64_t, - 0x16a6b036d7f2a79_u64.wrapping_add(x2_0) as uint64_t, + MOD.wrapping_add(1_i32 as u64), + MOD.wrapping_add(x2_0), &mut e.c2rust_unnamed.encoded_hi, ); - e.c2rust_unnamed.encoded_lo = - e.c2rust_unnamed.encoded_lo.wrapping_add(x1_0) as uint64_t as uint64_t; + e.c2rust_unnamed.encoded_lo = e.c2rust_unnamed.encoded_lo.wrapping_add(x1_0); e.c2rust_unnamed.encoded_hi = e .c2rust_unnamed .encoded_hi - .wrapping_add((e.c2rust_unnamed.encoded_lo < x1_0) as i32 as u64) - as uint64_t as uint64_t; + .wrapping_add((e.c2rust_unnamed.encoded_lo < x1_0) as i32 as u64); } else { // points (-x1+x2, v(-x1+x2)) and (-x1-x2, v(-x1-x2)) - let mut x1a: uint64_t = residue_sub(x1_0, x2_0); - let y1: uint64_t = residue_sub( - d_0.v[0_i32 as usize] as uint64_t, - residue_mul(d_0.v[1_i32 as usize] as uint64_t, x1a), + let mut x1a: u64 = residue_sub(x1_0, x2_0); + let y1: u64 = residue_sub( + d_0.v[0_i32 as usize] as u64, + residue_mul(d_0.v[1_i32 as usize] as u64, x1a), ); - let mut x2a: uint64_t = residue_add(x1_0, x2_0); - let y2: uint64_t = residue_sub( - d_0.v[0_i32 as usize] as uint64_t, - residue_mul(d_0.v[1_i32 as usize] as uint64_t, x2a), + let mut x2a: u64 = residue_add(x1_0, x2_0); + let y2: u64 = residue_sub( + d_0.v[0_i32 as usize] as u64, + residue_mul(d_0.v[1_i32 as usize] as u64, x2a), ); if x1a > x2a { std::mem::swap(&mut x1a, &mut x2a); @@ -1271,44 +1259,38 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) -> std::mem::swap(&mut x1a, &mut x2a); } e.c2rust_unnamed.encoded_lo = umul128( - 0x16a6b036d7f2a79_u64.wrapping_add(1_i32 as u64) as uint64_t, + MOD.wrapping_add(1_i32 as u64), x1a, &mut e.c2rust_unnamed.encoded_hi, ); - e.c2rust_unnamed.encoded_lo = - e.c2rust_unnamed.encoded_lo.wrapping_add(x2a) as uint64_t as uint64_t; + e.c2rust_unnamed.encoded_lo = e.c2rust_unnamed.encoded_lo.wrapping_add(x2a); e.c2rust_unnamed.encoded_hi = e .c2rust_unnamed .encoded_hi - .wrapping_add((e.c2rust_unnamed.encoded_lo < x2a) as i32 as u64) - as uint64_t as uint64_t; + .wrapping_add((e.c2rust_unnamed.encoded_lo < x2a) as i32 as u64); } } let mut decimal: [u8; 35] = [0; 35]; let mut i = 0_i32 as size_t; while i < 35_i32 as u64 { let c: u32 = (e.c2rust_unnamed_0.encoded[3_i32 as usize]).wrapping_rem(10_i32 as u32); - e.c2rust_unnamed_0.encoded[3_i32 as usize] = e.c2rust_unnamed_0.encoded[3_i32 as usize] - .wrapping_div(10_i32 as u32) - as uint32_t as uint32_t; - let c2: u32 = ((c as uint64_t) << 32_i32 - | e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64) + e.c2rust_unnamed_0.encoded[3_i32 as usize] = + e.c2rust_unnamed_0.encoded[3_i32 as usize].wrapping_div(10_i32 as u32); + let c2: u32 = ((c as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64) .wrapping_rem(10_i32 as u64) as u32; e.c2rust_unnamed_0.encoded[2_i32 as usize] = - ((c as uint64_t) << 32_i32 | e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64) - .wrapping_div(10_i32 as u64) as uint32_t; - let c3: u32 = ((c2 as uint64_t) << 32_i32 - | e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64) + ((c as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64) + .wrapping_div(10_i32 as u64) as u32; + let c3: u32 = ((c2 as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64) .wrapping_rem(10_i32 as u64) as u32; e.c2rust_unnamed_0.encoded[1_i32 as usize] = - ((c2 as uint64_t) << 32_i32 | e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64) - .wrapping_div(10_i32 as u64) as uint32_t; - let c4: u32 = ((c3 as uint64_t) << 32_i32 - | e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64) + ((c2 as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64) + .wrapping_div(10_i32 as u64) as u32; + let c4: u32 = ((c3 as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64) .wrapping_rem(10_i32 as u64) as u32; e.c2rust_unnamed_0.encoded[0_i32 as usize] = - ((c3 as uint64_t) << 32_i32 | e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64) - .wrapping_div(10_i32 as u64) as uint32_t; + ((c3 as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64) + .wrapping_div(10_i32 as u64) as u32; decimal[(34_i32 as u64).wrapping_sub(i) as usize] = c4 as u8; i = i.wrapping_add(1); }