1465 lines
51 KiB
Rust
1465 lines
51 KiB
Rust
#![allow(non_camel_case_types, non_snake_case, non_upper_case_globals)]
|
|
|
|
use std::ffi::{CStr, CString};
|
|
|
|
use thiserror::Error;
|
|
extern "C" {
|
|
fn memcpy(_: *mut libc::c_void, _: *const libc::c_void, _: u64) -> *mut libc::c_void;
|
|
fn memset(_: *mut libc::c_void, _: i32, _: u64) -> *mut libc::c_void;
|
|
}
|
|
type size_t = u64;
|
|
type int64_t = i64;
|
|
type uint16_t = u16;
|
|
type uint32_t = u32;
|
|
type uint64_t = u64;
|
|
#[derive(Copy, Clone)]
|
|
#[repr(C)]
|
|
struct TDivisor {
|
|
u: [uint16_t; 2],
|
|
v: [uint16_t; 2],
|
|
}
|
|
#[derive(Copy, Clone)]
|
|
#[repr(C)]
|
|
struct C2RustUnnamed {
|
|
encoded: [uint32_t; 4],
|
|
}
|
|
#[derive(Copy, Clone)]
|
|
#[repr(C)]
|
|
union C2RustUnnamed_0 {
|
|
c2rust_unnamed: C2RustUnnamed_1,
|
|
c2rust_unnamed_0: C2RustUnnamed,
|
|
}
|
|
#[derive(Copy, Clone)]
|
|
#[repr(C)]
|
|
struct C2RustUnnamed_1 {
|
|
encoded_lo: uint64_t,
|
|
encoded_hi: uint64_t,
|
|
}
|
|
#[derive(Copy, Clone)]
|
|
#[repr(C)]
|
|
struct C2RustUnnamed_2 {
|
|
lo: uint64_t,
|
|
hi: uint64_t,
|
|
}
|
|
#[derive(Copy, Clone)]
|
|
#[repr(C)]
|
|
union C2RustUnnamed_3 {
|
|
buffer: [libc::c_uchar; 14],
|
|
c2rust_unnamed: C2RustUnnamed_2,
|
|
}
|
|
#[derive(Copy, Clone)]
|
|
#[repr(C, packed)]
|
|
struct C2RustUnnamed_4 {
|
|
HardwareID: uint64_t,
|
|
ProductIDLow: uint64_t,
|
|
ProductIDHigh: libc::c_uchar,
|
|
KeySHA1: libc::c_ushort,
|
|
}
|
|
static mut f: [uint64_t; 6] = [
|
|
0_i32 as uint64_t,
|
|
0x21840136c85381_u64 as uint64_t,
|
|
0x44197b83892ad0_u64 as uint64_t,
|
|
0x1400606322b3b04_u64 as uint64_t,
|
|
0x1400606322b3b04_u64 as uint64_t,
|
|
1_i32 as uint64_t,
|
|
];
|
|
#[no_mangle]
|
|
unsafe extern "C" fn residue_add(mut x: uint64_t, mut y: uint64_t) -> uint64_t {
|
|
let mut z: uint64_t = x.wrapping_add(y);
|
|
if z >= 0x16a6b036d7f2a79_u64 {
|
|
z = z.wrapping_sub(0x16a6b036d7f2a79_u64) as uint64_t as uint64_t;
|
|
}
|
|
z
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn residue_sub(mut x: uint64_t, mut y: uint64_t) -> uint64_t {
|
|
let mut z: uint64_t = x.wrapping_sub(y);
|
|
if x < y {
|
|
z = z.wrapping_add(0x16a6b036d7f2a79_u64) as uint64_t as uint64_t;
|
|
}
|
|
z
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn __umul128(
|
|
mut multiplier: uint64_t,
|
|
mut multiplicand: uint64_t,
|
|
mut product_hi: *mut uint64_t,
|
|
) -> uint64_t {
|
|
let mut a: uint64_t = multiplier >> 32_i32;
|
|
let mut b: uint64_t = multiplier & 0xffffffff_u32 as u64;
|
|
let mut c: uint64_t = multiplicand >> 32_i32;
|
|
let mut d: uint64_t = multiplicand & 0xffffffff_u32 as u64;
|
|
let mut ad: uint64_t = a.wrapping_mul(d);
|
|
let mut bd: uint64_t = b.wrapping_mul(d);
|
|
let mut adbc: uint64_t = ad.wrapping_add(b.wrapping_mul(c));
|
|
let mut adbc_carry: uint64_t = (if adbc < ad { 1_i32 } else { 0_i32 }) as uint64_t;
|
|
let mut product_lo: uint64_t = bd.wrapping_add(adbc << 32_i32);
|
|
let mut product_lo_carry: uint64_t = (if product_lo < bd { 1_i32 } else { 0_i32 }) as uint64_t;
|
|
*product_hi = a
|
|
.wrapping_mul(c)
|
|
.wrapping_add(adbc >> 32_i32)
|
|
.wrapping_add(adbc_carry << 32_i32)
|
|
.wrapping_add(product_lo_carry);
|
|
product_lo
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn ui128_quotient_mod(mut lo: uint64_t, mut hi: uint64_t) -> uint64_t {
|
|
let mut prod1: uint64_t = 0;
|
|
__umul128(lo, 0x604fa6a1c6346a87_i64 as uint64_t, &mut prod1);
|
|
let mut part1hi: uint64_t = 0;
|
|
let mut part1lo: uint64_t = __umul128(lo, 0x2d351c6d04f8b_i64 as uint64_t, &mut part1hi);
|
|
let mut part2hi: uint64_t = 0;
|
|
let mut part2lo: uint64_t = __umul128(hi, 0x604fa6a1c6346a87_i64 as uint64_t, &mut part2hi);
|
|
let mut sum1: uint64_t = part1lo.wrapping_add(part2lo);
|
|
let mut sum1carry: u32 = (sum1 < part1lo) as i32 as u32;
|
|
sum1 = sum1.wrapping_add(prod1) as uint64_t as uint64_t;
|
|
sum1carry = sum1carry.wrapping_add((sum1 < prod1) as i32 as u32);
|
|
let mut prod2: uint64_t = part1hi.wrapping_add(part2hi).wrapping_add(sum1carry as u64);
|
|
let mut prod3hi: uint64_t = 0;
|
|
let mut prod3lo: uint64_t = __umul128(hi, 0x2d351c6d04f8b_i64 as uint64_t, &mut prod3hi);
|
|
prod3lo = prod3lo.wrapping_add(prod2) as uint64_t as uint64_t;
|
|
prod3hi = prod3hi.wrapping_add((prod3lo < prod2) as i32 as u64) as uint64_t as uint64_t;
|
|
prod3lo >> 42_i32 | prod3hi << 22_i32
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn residue_mul(mut x: uint64_t, mut y: uint64_t) -> uint64_t {
|
|
let mut hi: uint64_t = 0;
|
|
let mut lo: uint64_t = __umul128(x, y, &mut hi);
|
|
let mut quotient: uint64_t = ui128_quotient_mod(lo, hi);
|
|
lo.wrapping_sub(quotient.wrapping_mul(0x16a6b036d7f2a79_u64)) as uint64_t
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn residue_pow(mut x: uint64_t, mut y: uint64_t) -> uint64_t {
|
|
if y == 0_i32 as u64 {
|
|
return 1_i32 as uint64_t;
|
|
}
|
|
let mut cur: uint64_t = x;
|
|
while y & 1_i32 as u64 == 0 {
|
|
cur = residue_mul(cur, cur);
|
|
y >>= 1_i32;
|
|
}
|
|
let mut res: uint64_t = cur;
|
|
loop {
|
|
y >>= 1_i32;
|
|
if y == 0_i32 as u64 {
|
|
break;
|
|
}
|
|
cur = residue_mul(cur, cur);
|
|
if y & 1_i32 as u64 != 0 {
|
|
res = residue_mul(res, cur);
|
|
}
|
|
}
|
|
res
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn inverse(mut u: uint64_t, mut v: uint64_t) -> uint64_t {
|
|
let mut tmp: int64_t = 0;
|
|
let mut xu: int64_t = 1_i32 as int64_t;
|
|
let mut xv: int64_t = 0_i32 as int64_t;
|
|
let mut v0: uint64_t = v;
|
|
while u > 1_i32 as u64 {
|
|
let mut d: uint64_t = v.wrapping_div(u);
|
|
let mut remainder: uint64_t = v.wrapping_rem(u);
|
|
tmp = u as int64_t;
|
|
u = remainder;
|
|
v = tmp as uint64_t;
|
|
tmp = xu;
|
|
xu = (xv as u64).wrapping_sub(d.wrapping_mul(xu as u64)) as int64_t;
|
|
xv = tmp;
|
|
}
|
|
xu = (xu as u64).wrapping_add(if xu < 0_i32 as i64 { v0 } else { 0_i32 as u64 }) as int64_t
|
|
as int64_t;
|
|
xu as uint64_t
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn residue_inv(mut x: uint64_t) -> uint64_t {
|
|
inverse(x, 0x16a6b036d7f2a79_u64 as uint64_t)
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn residue_sqrt(mut what: uint64_t) -> uint64_t {
|
|
if what == 0 {
|
|
return 0_i32 as uint64_t;
|
|
}
|
|
let mut g: uint64_t = 43_i32 as uint64_t;
|
|
let mut z: uint64_t = 0;
|
|
let mut y: uint64_t = 0;
|
|
let mut r: uint64_t = 0;
|
|
let mut x: uint64_t = 0;
|
|
let mut b: uint64_t = 0;
|
|
let mut t: uint64_t = 0;
|
|
let mut e: uint64_t = 0_i32 as uint64_t;
|
|
let mut q: uint64_t = 0x16a6b036d7f2a79_u64.wrapping_sub(1_i32 as u64) as uint64_t;
|
|
while q & 1_i32 as u64 == 0 {
|
|
e = e.wrapping_add(1);
|
|
q >>= 1_i32;
|
|
}
|
|
z = residue_pow(g, q);
|
|
y = z;
|
|
r = e;
|
|
x = residue_pow(
|
|
what,
|
|
q.wrapping_sub(1_i32 as u64).wrapping_div(2_i32 as u64),
|
|
);
|
|
b = residue_mul(residue_mul(what, x), x);
|
|
x = residue_mul(what, x);
|
|
while b != 1_i32 as u64 {
|
|
let mut m: uint64_t = 0_i32 as uint64_t;
|
|
let mut b2: uint64_t = b;
|
|
loop {
|
|
m = m.wrapping_add(1);
|
|
b2 = residue_mul(b2, b2);
|
|
if b2 == 1_i32 as u64 {
|
|
break;
|
|
}
|
|
}
|
|
if m == r {
|
|
return 0xffffffffffffffff_u64 as uint64_t;
|
|
}
|
|
t = residue_pow(
|
|
y,
|
|
(1_i32 << r.wrapping_sub(m).wrapping_sub(1_i32 as u64)) as uint64_t,
|
|
);
|
|
y = residue_mul(t, t);
|
|
r = m;
|
|
x = residue_mul(x, t);
|
|
b = residue_mul(b, y);
|
|
}
|
|
if residue_mul(x, x) != what {
|
|
return 0xffffffffffffffff_u64 as uint64_t;
|
|
}
|
|
x
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn find_divisor_v(mut d: *mut TDivisor) -> i32 {
|
|
let mut v1: uint64_t = 0;
|
|
let mut f2: [uint64_t; 6] = [0; 6];
|
|
let mut i: i32 = 0_i32;
|
|
while i < 6_i32 {
|
|
f2[i as usize] = f[i as usize];
|
|
i += 1;
|
|
}
|
|
let u0: uint64_t = (*d).u[0_i32 as usize] as uint64_t;
|
|
let u1: uint64_t = (*d).u[1_i32 as usize] as uint64_t;
|
|
let mut j: i32 = 4_i32;
|
|
loop {
|
|
let fresh0 = j;
|
|
j -= 1;
|
|
if fresh0 == 0 {
|
|
break;
|
|
}
|
|
f2[j as usize] = residue_sub(f2[j as usize], residue_mul(u0, f2[(j + 2_i32) as usize]));
|
|
f2[(j + 1_i32) as usize] = residue_sub(
|
|
f2[(j + 1_i32) as usize],
|
|
residue_mul(u1, f2[(j + 2_i32) as usize]),
|
|
);
|
|
f2[(j + 2_i32) as usize] = 0_i32 as uint64_t;
|
|
}
|
|
let f0: uint64_t = f2[0_i32 as usize];
|
|
let f1: uint64_t = f2[1_i32 as usize];
|
|
let u0double: uint64_t = residue_add(u0, u0);
|
|
let coeff2: uint64_t = residue_sub(residue_mul(u1, u1), residue_add(u0double, u0double));
|
|
let coeff1: uint64_t = residue_sub(residue_add(f0, f0), residue_mul(f1, u1));
|
|
if coeff2 == 0_i32 as u64 {
|
|
if coeff1 == 0_i32 as u64 {
|
|
if f1 == 0_i32 as u64 {
|
|
// impossible
|
|
panic!("bad f(), double root detected");
|
|
}
|
|
return 0_i32;
|
|
}
|
|
let mut sqr: uint64_t = residue_mul(
|
|
residue_mul(f1, f1),
|
|
residue_inv(residue_add(coeff1, coeff1)),
|
|
);
|
|
v1 = residue_sqrt(sqr);
|
|
if v1 == 0xffffffffffffffff_u64 {
|
|
return 0_i32;
|
|
}
|
|
} else {
|
|
let mut d_0: uint64_t = residue_add(
|
|
residue_mul(f0, f0),
|
|
residue_mul(f1, residue_sub(residue_mul(f1, u0), residue_mul(f0, u1))),
|
|
);
|
|
d_0 = residue_sqrt(d_0);
|
|
if d_0 == 0xffffffffffffffff_u64 {
|
|
return 0_i32;
|
|
}
|
|
d_0 = residue_add(d_0, d_0);
|
|
let mut inv: uint64_t = residue_inv(coeff2);
|
|
let mut root: uint64_t = residue_mul(residue_add(coeff1, d_0), inv);
|
|
v1 = residue_sqrt(root);
|
|
if v1 == 0xffffffffffffffff_u64 {
|
|
root = residue_mul(residue_sub(coeff1, d_0), inv);
|
|
v1 = residue_sqrt(root);
|
|
if v1 == 0xffffffffffffffff_u64 {
|
|
return 0_i32;
|
|
}
|
|
}
|
|
}
|
|
let mut v0: uint64_t = residue_mul(
|
|
residue_add(f1, residue_mul(u1, residue_mul(v1, v1))),
|
|
residue_inv(residue_add(v1, v1)),
|
|
);
|
|
(*d).v[0_i32 as usize] = v0 as uint16_t;
|
|
(*d).v[1_i32 as usize] = v1 as uint16_t;
|
|
1_i32
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn polynomial_mul(
|
|
mut adeg: i32,
|
|
mut a: *const uint64_t,
|
|
mut bdeg: i32,
|
|
mut b: *const uint64_t,
|
|
mut resultprevdeg: i32,
|
|
mut result: *mut uint64_t,
|
|
) -> i32 {
|
|
if adeg < 0_i32 || bdeg < 0_i32 {
|
|
return resultprevdeg;
|
|
}
|
|
let mut i: i32 = 0;
|
|
let mut j: i32 = 0;
|
|
i = resultprevdeg + 1_i32;
|
|
while i <= adeg + bdeg {
|
|
*result.offset(i as isize) = 0_i32 as uint64_t;
|
|
i += 1;
|
|
}
|
|
resultprevdeg = i - 1_i32;
|
|
i = 0_i32;
|
|
while i <= adeg {
|
|
j = 0_i32;
|
|
while j <= bdeg {
|
|
*result.offset((i + j) as isize) = residue_add(
|
|
*result.offset((i + j) as isize),
|
|
residue_mul(*a.offset(i as isize), *b.offset(j as isize)),
|
|
);
|
|
j += 1;
|
|
}
|
|
i += 1;
|
|
}
|
|
while resultprevdeg >= 0_i32 && *result.offset(resultprevdeg as isize) == 0_i32 as u64 {
|
|
resultprevdeg -= 1;
|
|
}
|
|
resultprevdeg
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn polynomial_div_monic(
|
|
mut adeg: i32,
|
|
mut a: *mut uint64_t,
|
|
mut bdeg: i32,
|
|
mut b: *const uint64_t,
|
|
mut quotient: *mut uint64_t,
|
|
) -> i32 {
|
|
let mut i: i32 = 0;
|
|
let mut j: i32 = 0;
|
|
i = adeg - bdeg;
|
|
while i >= 0_i32 {
|
|
let mut q: uint64_t = *a.offset((i + bdeg) as isize);
|
|
if !quotient.is_null() {
|
|
*quotient.offset(i as isize) = q;
|
|
}
|
|
j = 0_i32;
|
|
while j < bdeg {
|
|
*a.offset((i + j) as isize) = residue_sub(
|
|
*a.offset((i + j) as isize),
|
|
residue_mul(q, *b.offset(j as isize)),
|
|
);
|
|
j += 1;
|
|
}
|
|
*a.offset((i + j) as isize) = 0_i32 as uint64_t;
|
|
i -= 1;
|
|
}
|
|
i += bdeg;
|
|
while i >= 0_i32 && *a.offset(i as isize) == 0_i32 as u64 {
|
|
i -= 1;
|
|
}
|
|
i
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn polynomial_xgcd(
|
|
mut adeg: i32,
|
|
mut a: *const uint64_t,
|
|
mut bdeg: i32,
|
|
mut b: *const uint64_t,
|
|
mut pgcddeg: *mut i32,
|
|
mut gcd: *mut uint64_t,
|
|
mut pmult1deg: *mut i32,
|
|
mut mult1: *mut uint64_t,
|
|
mut pmult2deg: *mut i32,
|
|
mut mult2: *mut uint64_t,
|
|
) {
|
|
let mut sdeg: i32 = -1_i32;
|
|
let mut s: [uint64_t; 3] = [0_i32 as uint64_t, 0_i32 as uint64_t, 0_i32 as uint64_t];
|
|
let mut mult1deg: i32 = 0_i32;
|
|
*mult1.offset(0_i32 as isize) = 1_i32 as uint64_t;
|
|
*mult1.offset(1_i32 as isize) = 0_i32 as uint64_t;
|
|
*mult1.offset(2_i32 as isize) = 0_i32 as uint64_t;
|
|
let mut tdeg: i32 = 0_i32;
|
|
let mut t: [uint64_t; 3] = [1_i32 as uint64_t, 0_i32 as uint64_t, 0_i32 as uint64_t];
|
|
let mut mult2deg: i32 = -1_i32;
|
|
*mult2.offset(0_i32 as isize) = 0_i32 as uint64_t;
|
|
*mult2.offset(1_i32 as isize) = 0_i32 as uint64_t;
|
|
*mult2.offset(2_i32 as isize) = 0_i32 as uint64_t;
|
|
let mut rdeg: i32 = bdeg;
|
|
let mut r: [uint64_t; 3] = [
|
|
*b.offset(0_i32 as isize),
|
|
*b.offset(1_i32 as isize),
|
|
*b.offset(2_i32 as isize),
|
|
];
|
|
let mut gcddeg: i32 = adeg;
|
|
*gcd.offset(0_i32 as isize) = *a.offset(0_i32 as isize);
|
|
*gcd.offset(1_i32 as isize) = *a.offset(1_i32 as isize);
|
|
*gcd.offset(2_i32 as isize) = *a.offset(2_i32 as isize);
|
|
while rdeg >= 0_i32 {
|
|
if rdeg > gcddeg {
|
|
let mut tmp: u32 = 0;
|
|
let mut tmpi: i32 = 0;
|
|
tmp = rdeg as u32;
|
|
rdeg = gcddeg;
|
|
gcddeg = tmp as i32;
|
|
tmpi = sdeg;
|
|
sdeg = mult1deg;
|
|
mult1deg = tmpi;
|
|
tmpi = tdeg;
|
|
tdeg = mult2deg;
|
|
mult2deg = tmpi;
|
|
let mut tmp2: uint64_t = 0;
|
|
tmp2 = r[0_i32 as usize];
|
|
r[0_i32 as usize] = *gcd.offset(0_i32 as isize);
|
|
*gcd.offset(0_i32 as isize) = tmp2;
|
|
tmp2 = r[1_i32 as usize];
|
|
r[1_i32 as usize] = *gcd.offset(1_i32 as isize);
|
|
*gcd.offset(1_i32 as isize) = tmp2;
|
|
tmp2 = r[2_i32 as usize];
|
|
r[2_i32 as usize] = *gcd.offset(2_i32 as isize);
|
|
*gcd.offset(2_i32 as isize) = tmp2;
|
|
tmp2 = s[0_i32 as usize];
|
|
s[0_i32 as usize] = *mult1.offset(0_i32 as isize);
|
|
*mult1.offset(0_i32 as isize) = tmp2;
|
|
tmp2 = s[1_i32 as usize];
|
|
s[1_i32 as usize] = *mult1.offset(1_i32 as isize);
|
|
*mult1.offset(1_i32 as isize) = tmp2;
|
|
tmp2 = s[2_i32 as usize];
|
|
s[2_i32 as usize] = *mult1.offset(2_i32 as isize);
|
|
*mult1.offset(2_i32 as isize) = tmp2;
|
|
tmp2 = t[0_i32 as usize];
|
|
t[0_i32 as usize] = *mult2.offset(0_i32 as isize);
|
|
*mult2.offset(0_i32 as isize) = tmp2;
|
|
tmp2 = t[1_i32 as usize];
|
|
t[1_i32 as usize] = *mult2.offset(1_i32 as isize);
|
|
*mult2.offset(1_i32 as isize) = tmp2;
|
|
tmp2 = t[2_i32 as usize];
|
|
t[2_i32 as usize] = *mult2.offset(2_i32 as isize);
|
|
*mult2.offset(2_i32 as isize) = tmp2;
|
|
} else {
|
|
let mut delta: i32 = gcddeg - rdeg;
|
|
let mut mult: uint64_t =
|
|
residue_mul(*gcd.offset(gcddeg as isize), residue_inv(r[rdeg as usize]));
|
|
let mut i: i32 = 0_i32;
|
|
while i <= rdeg {
|
|
*gcd.offset((i + delta) as isize) = residue_sub(
|
|
*gcd.offset((i + delta) as isize),
|
|
residue_mul(mult, r[i as usize]),
|
|
);
|
|
i += 1;
|
|
}
|
|
while gcddeg >= 0_i32 && *gcd.offset(gcddeg as isize) == 0_i32 as u64 {
|
|
gcddeg -= 1;
|
|
}
|
|
let mut i_0: i32 = 0_i32;
|
|
while i_0 <= sdeg {
|
|
*mult1.offset((i_0 + delta) as isize) = residue_sub(
|
|
*mult1.offset((i_0 + delta) as isize),
|
|
residue_mul(mult, s[i_0 as usize]),
|
|
);
|
|
i_0 += 1;
|
|
}
|
|
if mult1deg < sdeg + delta {
|
|
mult1deg = sdeg + delta;
|
|
}
|
|
while mult1deg >= 0_i32 && *mult1.offset(mult1deg as isize) == 0_i32 as u64 {
|
|
mult1deg -= 1;
|
|
}
|
|
let mut i_1: i32 = 0_i32;
|
|
while i_1 <= tdeg {
|
|
*mult2.offset((i_1 + delta) as isize) = residue_sub(
|
|
*mult2.offset((i_1 + delta) as isize),
|
|
residue_mul(mult, t[i_1 as usize]),
|
|
);
|
|
i_1 += 1;
|
|
}
|
|
if mult2deg < tdeg + delta {
|
|
mult2deg = tdeg + delta;
|
|
}
|
|
while mult2deg >= 0_i32 && *mult2.offset(mult2deg as isize) == 0_i32 as u64 {
|
|
mult2deg -= 1;
|
|
}
|
|
}
|
|
}
|
|
*pgcddeg = gcddeg;
|
|
*pmult1deg = mult1deg;
|
|
*pmult2deg = mult2deg;
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn u2poly(
|
|
mut src: *const TDivisor,
|
|
mut polyu: *mut uint64_t,
|
|
mut polyv: *mut uint64_t,
|
|
) -> i32 {
|
|
if (*src).u[1_i32 as usize] as u64 != 0xffffffffffffffff_u64 {
|
|
*polyu.offset(0_i32 as isize) = (*src).u[0_i32 as usize] as uint64_t;
|
|
*polyu.offset(1_i32 as isize) = (*src).u[1_i32 as usize] as uint64_t;
|
|
*polyu.offset(2_i32 as isize) = 1_i32 as uint64_t;
|
|
*polyv.offset(0_i32 as isize) = (*src).v[0_i32 as usize] as uint64_t;
|
|
*polyv.offset(1_i32 as isize) = (*src).v[1_i32 as usize] as uint64_t;
|
|
return 2_i32;
|
|
}
|
|
if (*src).u[0_i32 as usize] as u64 != 0xffffffffffffffff_u64 {
|
|
*polyu.offset(0_i32 as isize) = (*src).u[0_i32 as usize] as uint64_t;
|
|
*polyu.offset(1_i32 as isize) = 1_i32 as uint64_t;
|
|
*polyv.offset(0_i32 as isize) = (*src).v[0_i32 as usize] as uint64_t;
|
|
*polyv.offset(1_i32 as isize) = 0_i32 as uint64_t;
|
|
return 1_i32;
|
|
}
|
|
*polyu.offset(0_i32 as isize) = 1_i32 as uint64_t;
|
|
*polyv.offset(0_i32 as isize) = 0_i32 as uint64_t;
|
|
*polyv.offset(1_i32 as isize) = 0_i32 as uint64_t;
|
|
0_i32
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn divisor_add(
|
|
mut src1: *const TDivisor,
|
|
mut src2: *const TDivisor,
|
|
mut dst: *mut TDivisor,
|
|
) {
|
|
let mut u1: [uint64_t; 3] = [0; 3];
|
|
let mut u2: [uint64_t; 3] = [0; 3];
|
|
let mut v1: [uint64_t; 2] = [0; 2];
|
|
let mut v2: [uint64_t; 2] = [0; 2];
|
|
let mut u1deg: i32 = u2poly(src1, u1.as_mut_ptr(), v1.as_mut_ptr());
|
|
let mut u2deg: i32 = u2poly(src2, u2.as_mut_ptr(), v2.as_mut_ptr());
|
|
let mut d1deg: i32 = 0;
|
|
let mut e1deg: i32 = 0;
|
|
let mut e2deg: i32 = 0;
|
|
let mut d1: [uint64_t; 3] = [0; 3];
|
|
let mut e1: [uint64_t; 3] = [0; 3];
|
|
let mut e2: [uint64_t; 3] = [0; 3];
|
|
polynomial_xgcd(
|
|
u1deg,
|
|
u1.as_mut_ptr() as *const uint64_t,
|
|
u2deg,
|
|
u2.as_mut_ptr() as *const uint64_t,
|
|
&mut d1deg,
|
|
d1.as_mut_ptr(),
|
|
&mut e1deg,
|
|
e1.as_mut_ptr(),
|
|
&mut e2deg,
|
|
e2.as_mut_ptr(),
|
|
);
|
|
let mut b: [uint64_t; 3] = [
|
|
residue_add(v1[0_i32 as usize], v2[0_i32 as usize]),
|
|
residue_add(v1[1_i32 as usize], v2[1_i32 as usize]),
|
|
0_i32 as uint64_t,
|
|
];
|
|
let mut bdeg: i32 = if b[1_i32 as usize] == 0_i32 as u64 {
|
|
if b[0_i32 as usize] == 0_i32 as u64 {
|
|
-1_i32
|
|
} else {
|
|
0_i32
|
|
}
|
|
} else {
|
|
1_i32
|
|
};
|
|
let mut ddeg: i32 = 0;
|
|
let mut c1deg: i32 = 0;
|
|
let mut c2deg: i32 = 0;
|
|
let mut d: [uint64_t; 3] = [0; 3];
|
|
let mut c1: [uint64_t; 3] = [0; 3];
|
|
let mut c2: [uint64_t; 3] = [0; 3];
|
|
polynomial_xgcd(
|
|
d1deg,
|
|
d1.as_mut_ptr() as *const uint64_t,
|
|
bdeg,
|
|
b.as_mut_ptr() as *const uint64_t,
|
|
&mut ddeg,
|
|
d.as_mut_ptr(),
|
|
&mut c1deg,
|
|
c1.as_mut_ptr(),
|
|
&mut c2deg,
|
|
c2.as_mut_ptr(),
|
|
);
|
|
let mut dmult: uint64_t = residue_inv(d[ddeg as usize]);
|
|
let mut i: i32 = 0;
|
|
i = 0_i32;
|
|
while i < ddeg {
|
|
d[i as usize] = residue_mul(d[i as usize], dmult);
|
|
i += 1;
|
|
}
|
|
d[i as usize] = 1_i32 as uint64_t;
|
|
i = 0_i32;
|
|
while i <= c1deg {
|
|
c1[i as usize] = residue_mul(c1[i as usize], dmult);
|
|
i += 1;
|
|
}
|
|
i = 0_i32;
|
|
while i <= c2deg {
|
|
c2[i as usize] = residue_mul(c2[i as usize], dmult);
|
|
i += 1;
|
|
}
|
|
let mut u: [uint64_t; 5] = [0; 5];
|
|
let mut udeg: i32 = polynomial_mul(
|
|
u1deg,
|
|
u1.as_mut_ptr() as *const uint64_t,
|
|
u2deg,
|
|
u2.as_mut_ptr() as *const uint64_t,
|
|
-1_i32,
|
|
u.as_mut_ptr(),
|
|
);
|
|
let mut v: [uint64_t; 7] = [0; 7];
|
|
let mut tmp: [uint64_t; 7] = [0; 7];
|
|
let mut vdeg: i32 = 0;
|
|
let mut tmpdeg: i32 = 0;
|
|
v[0_i32 as usize] = residue_sub(v2[0_i32 as usize], v1[0_i32 as usize]);
|
|
v[1_i32 as usize] = residue_sub(v2[1_i32 as usize], v1[1_i32 as usize]);
|
|
tmpdeg = polynomial_mul(
|
|
e1deg,
|
|
e1.as_mut_ptr() as *const uint64_t,
|
|
1_i32,
|
|
v.as_mut_ptr() as *const uint64_t,
|
|
-1_i32,
|
|
tmp.as_mut_ptr(),
|
|
);
|
|
vdeg = polynomial_mul(
|
|
u1deg,
|
|
u1.as_mut_ptr() as *const uint64_t,
|
|
tmpdeg,
|
|
tmp.as_mut_ptr() as *const uint64_t,
|
|
-1_i32,
|
|
v.as_mut_ptr(),
|
|
);
|
|
vdeg = polynomial_mul(
|
|
d1deg,
|
|
d1.as_mut_ptr() as *const uint64_t,
|
|
1_i32,
|
|
v1.as_mut_ptr() as *const uint64_t,
|
|
vdeg,
|
|
v.as_mut_ptr(),
|
|
);
|
|
i = 0_i32;
|
|
while i <= vdeg {
|
|
v[i as usize] = residue_mul(v[i as usize], c1[0_i32 as usize]);
|
|
i += 1;
|
|
}
|
|
memcpy(
|
|
tmp.as_mut_ptr() as *mut libc::c_void,
|
|
f.as_ptr() as *const libc::c_void,
|
|
(6_i32 as u64).wrapping_mul(::std::mem::size_of::<uint64_t>() as u64),
|
|
);
|
|
tmpdeg = 5_i32;
|
|
tmpdeg = polynomial_mul(
|
|
1_i32,
|
|
v1.as_mut_ptr() as *const uint64_t,
|
|
1_i32,
|
|
v2.as_mut_ptr() as *const uint64_t,
|
|
tmpdeg,
|
|
tmp.as_mut_ptr(),
|
|
);
|
|
vdeg = polynomial_mul(
|
|
c2deg,
|
|
c2.as_mut_ptr() as *const uint64_t,
|
|
tmpdeg,
|
|
tmp.as_mut_ptr() as *const uint64_t,
|
|
vdeg,
|
|
v.as_mut_ptr(),
|
|
);
|
|
if ddeg > 0_i32 {
|
|
let mut udiv: [uint64_t; 5] = [0; 5];
|
|
polynomial_div_monic(
|
|
udeg,
|
|
u.as_mut_ptr(),
|
|
ddeg,
|
|
d.as_mut_ptr() as *const uint64_t,
|
|
udiv.as_mut_ptr(),
|
|
);
|
|
udeg -= ddeg;
|
|
polynomial_div_monic(
|
|
udeg,
|
|
udiv.as_mut_ptr(),
|
|
ddeg,
|
|
d.as_mut_ptr() as *const uint64_t,
|
|
u.as_mut_ptr(),
|
|
);
|
|
udeg -= ddeg;
|
|
if vdeg >= 0_i32 {
|
|
polynomial_div_monic(
|
|
vdeg,
|
|
v.as_mut_ptr(),
|
|
ddeg,
|
|
d.as_mut_ptr() as *const uint64_t,
|
|
udiv.as_mut_ptr(),
|
|
);
|
|
vdeg -= ddeg;
|
|
memcpy(
|
|
v.as_mut_ptr() as *mut libc::c_void,
|
|
udiv.as_mut_ptr() as *const libc::c_void,
|
|
((vdeg + 1_i32) as u64).wrapping_mul(::std::mem::size_of::<uint64_t>() as u64),
|
|
);
|
|
}
|
|
}
|
|
vdeg = polynomial_div_monic(
|
|
vdeg,
|
|
v.as_mut_ptr(),
|
|
udeg,
|
|
u.as_mut_ptr() as *const uint64_t,
|
|
std::ptr::null_mut::<uint64_t>(),
|
|
);
|
|
while udeg > 2_i32 {
|
|
tmpdeg = polynomial_mul(
|
|
vdeg,
|
|
v.as_mut_ptr() as *const uint64_t,
|
|
vdeg,
|
|
v.as_mut_ptr() as *const uint64_t,
|
|
-1_i32,
|
|
tmp.as_mut_ptr(),
|
|
);
|
|
i = 0_i32;
|
|
while i <= tmpdeg && i <= 5_i32 {
|
|
tmp[i as usize] = residue_sub(f[i as usize], tmp[i as usize]);
|
|
i += 1;
|
|
}
|
|
while i <= tmpdeg {
|
|
tmp[i as usize] = residue_sub(0_i32 as uint64_t, tmp[i as usize]);
|
|
i += 1;
|
|
}
|
|
while i <= 5_i32 {
|
|
tmp[i as usize] = f[i as usize];
|
|
i += 1;
|
|
}
|
|
tmpdeg = i - 1_i32;
|
|
let mut udiv_0: [uint64_t; 5] = [0; 5];
|
|
polynomial_div_monic(
|
|
tmpdeg,
|
|
tmp.as_mut_ptr(),
|
|
udeg,
|
|
u.as_mut_ptr() as *const uint64_t,
|
|
udiv_0.as_mut_ptr(),
|
|
);
|
|
udeg = tmpdeg - udeg;
|
|
let mut mult: uint64_t = residue_inv(udiv_0[udeg as usize]);
|
|
i = 0_i32;
|
|
while i < udeg {
|
|
u[i as usize] = residue_mul(udiv_0[i as usize], mult);
|
|
i += 1;
|
|
}
|
|
u[i as usize] = 1_i32 as uint64_t;
|
|
i = 0_i32;
|
|
while i <= vdeg {
|
|
v[i as usize] = residue_sub(0_i32 as uint64_t, v[i as usize]);
|
|
i += 1;
|
|
}
|
|
vdeg = polynomial_div_monic(
|
|
vdeg,
|
|
v.as_mut_ptr(),
|
|
udeg,
|
|
u.as_mut_ptr() as *const uint64_t,
|
|
std::ptr::null_mut::<uint64_t>(),
|
|
);
|
|
}
|
|
if udeg == 2_i32 {
|
|
(*dst).u[0_i32 as usize] = u[0_i32 as usize] as uint16_t;
|
|
(*dst).u[1_i32 as usize] = u[1_i32 as usize] as uint16_t;
|
|
(*dst).v[0_i32 as usize] = (if vdeg >= 0_i32 {
|
|
v[0_i32 as usize]
|
|
} else {
|
|
0_i32 as u64
|
|
}) as uint16_t;
|
|
(*dst).v[1_i32 as usize] = (if vdeg >= 1_i32 {
|
|
v[1_i32 as usize]
|
|
} else {
|
|
0_i32 as u64
|
|
}) as uint16_t;
|
|
} else if udeg == 1_i32 {
|
|
(*dst).u[0_i32 as usize] = u[0_i32 as usize] as uint16_t;
|
|
(*dst).u[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
(*dst).v[0_i32 as usize] = (if vdeg >= 0_i32 {
|
|
v[0_i32 as usize]
|
|
} else {
|
|
0_i32 as u64
|
|
}) as uint16_t;
|
|
(*dst).v[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
} else {
|
|
(*dst).u[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
(*dst).u[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
(*dst).v[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
(*dst).v[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
};
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn divisor_mul(
|
|
mut src: *const TDivisor,
|
|
mut mult: uint64_t,
|
|
mut dst: *mut TDivisor,
|
|
) {
|
|
if mult == 0_i32 as u64 {
|
|
(*dst).u[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
(*dst).u[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
(*dst).v[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
(*dst).v[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
return;
|
|
}
|
|
let mut cur: TDivisor = *src;
|
|
while mult & 1_i32 as u64 == 0 {
|
|
divisor_add(&cur, &cur, &mut cur);
|
|
mult >>= 1_i32;
|
|
}
|
|
*dst = cur;
|
|
loop {
|
|
mult >>= 1_i32;
|
|
if mult == 0_i32 as u64 {
|
|
break;
|
|
}
|
|
divisor_add(&cur, &cur, &mut cur);
|
|
if mult & 1_i32 as u64 != 0 {
|
|
divisor_add(dst, &cur, dst);
|
|
}
|
|
}
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn divisor_mul128(
|
|
mut src: *const TDivisor,
|
|
mut mult_lo: uint64_t,
|
|
mut mult_hi: uint64_t,
|
|
mut dst: *mut TDivisor,
|
|
) {
|
|
if mult_lo == 0_i32 as u64 && mult_hi == 0_i32 as u64 {
|
|
(*dst).u[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
(*dst).u[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
(*dst).v[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
(*dst).v[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
|
return;
|
|
}
|
|
let mut cur: TDivisor = *src;
|
|
while mult_lo & 1_i32 as u64 == 0 {
|
|
divisor_add(&cur, &cur, &mut cur);
|
|
mult_lo >>= 1_i32;
|
|
if mult_hi & 1_i32 as u64 != 0 {
|
|
mult_lo = (mult_lo | 1_u64 << 63_i32) as uint64_t;
|
|
}
|
|
mult_hi >>= 1_i32;
|
|
}
|
|
*dst = cur;
|
|
loop {
|
|
mult_lo >>= 1_i32;
|
|
if mult_hi & 1_i32 as u64 != 0 {
|
|
mult_lo = (mult_lo | 1_u64 << 63_i32) as uint64_t;
|
|
}
|
|
mult_hi >>= 1_i32;
|
|
if mult_lo == 0_i32 as u64 && mult_hi == 0_i32 as u64 {
|
|
break;
|
|
}
|
|
divisor_add(&cur, &cur, &mut cur);
|
|
if mult_lo & 1_i32 as u64 != 0 {
|
|
divisor_add(dst, &cur, dst);
|
|
}
|
|
}
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn rol(mut x: u32, mut shift: i32) -> u32 {
|
|
x << shift | x >> (32_i32 - shift)
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn sha1_single_block(
|
|
mut input: *mut libc::c_uchar,
|
|
mut output: *mut libc::c_uchar,
|
|
) {
|
|
let mut a: u32 = 0;
|
|
let mut b: u32 = 0;
|
|
let mut c: u32 = 0;
|
|
let mut d: u32 = 0;
|
|
let mut e: u32 = 0;
|
|
a = 0x67452301_i32 as u32;
|
|
b = 0xefcdab89_u32;
|
|
c = 0x98badcfe_u32;
|
|
d = 0x10325476_i32 as u32;
|
|
e = 0xc3d2e1f0_u32;
|
|
let mut w: [u32; 80] = [0; 80];
|
|
let mut i: size_t = 0;
|
|
i = 0_i32 as size_t;
|
|
while i < 16_i32 as u64 {
|
|
w[i as usize] = ((*input.offset((4_i32 as u64).wrapping_mul(i) as isize) as i32) << 24_i32
|
|
| (*input.offset((4_i32 as u64).wrapping_mul(i).wrapping_add(1_i32 as u64) as isize)
|
|
as i32)
|
|
<< 16_i32
|
|
| (*input.offset((4_i32 as u64).wrapping_mul(i).wrapping_add(2_i32 as u64) as isize)
|
|
as i32)
|
|
<< 8_i32
|
|
| *input.offset((4_i32 as u64).wrapping_mul(i).wrapping_add(3_i32 as u64) as isize)
|
|
as i32) as u32;
|
|
i = i.wrapping_add(1);
|
|
}
|
|
i = 16_i32 as size_t;
|
|
while i < 80_i32 as u64 {
|
|
w[i as usize] = rol(
|
|
w[i.wrapping_sub(3_i32 as u64) as usize]
|
|
^ w[i.wrapping_sub(8_i32 as u64) as usize]
|
|
^ w[i.wrapping_sub(14_i32 as u64) as usize]
|
|
^ w[i.wrapping_sub(16_i32 as u64) as usize],
|
|
1_i32,
|
|
);
|
|
i = i.wrapping_add(1);
|
|
}
|
|
i = 0_i32 as size_t;
|
|
while i < 20_i32 as u64 {
|
|
let mut tmp: u32 = (rol(a, 5_i32))
|
|
.wrapping_add(b & c | !b & d)
|
|
.wrapping_add(e)
|
|
.wrapping_add(w[i as usize])
|
|
.wrapping_add(0x5a827999_i32 as u32);
|
|
e = d;
|
|
d = c;
|
|
c = rol(b, 30_i32);
|
|
b = a;
|
|
a = tmp;
|
|
i = i.wrapping_add(1);
|
|
}
|
|
i = 20_i32 as size_t;
|
|
while i < 40_i32 as u64 {
|
|
let mut tmp_0: u32 = (rol(a, 5_i32))
|
|
.wrapping_add(b ^ c ^ d)
|
|
.wrapping_add(e)
|
|
.wrapping_add(w[i as usize])
|
|
.wrapping_add(0x6ed9eba1_i32 as u32);
|
|
e = d;
|
|
d = c;
|
|
c = rol(b, 30_i32);
|
|
b = a;
|
|
a = tmp_0;
|
|
i = i.wrapping_add(1);
|
|
}
|
|
i = 40_i32 as size_t;
|
|
while i < 60_i32 as u64 {
|
|
let mut tmp_1: u32 = (rol(a, 5_i32))
|
|
.wrapping_add(b & c | b & d | c & d)
|
|
.wrapping_add(e)
|
|
.wrapping_add(w[i as usize])
|
|
.wrapping_add(0x8f1bbcdc_u32);
|
|
e = d;
|
|
d = c;
|
|
c = rol(b, 30_i32);
|
|
b = a;
|
|
a = tmp_1;
|
|
i = i.wrapping_add(1);
|
|
}
|
|
i = 60_i32 as size_t;
|
|
while i < 80_i32 as u64 {
|
|
let mut tmp_2: u32 = (rol(a, 5_i32))
|
|
.wrapping_add(b ^ c ^ d)
|
|
.wrapping_add(e)
|
|
.wrapping_add(w[i as usize])
|
|
.wrapping_add(0xca62c1d6_u32);
|
|
e = d;
|
|
d = c;
|
|
c = rol(b, 30_i32);
|
|
b = a;
|
|
a = tmp_2;
|
|
i = i.wrapping_add(1);
|
|
}
|
|
a = a.wrapping_add(0x67452301_i32 as u32);
|
|
b = b.wrapping_add(0xefcdab89_u32);
|
|
c = c.wrapping_add(0x98badcfe_u32);
|
|
d = d.wrapping_add(0x10325476_i32 as u32);
|
|
e = e.wrapping_add(0xc3d2e1f0_u32);
|
|
*output.offset(0_i32 as isize) = (a >> 24_i32) as libc::c_uchar;
|
|
*output.offset(1_i32 as isize) = (a >> 16_i32) as libc::c_uchar;
|
|
*output.offset(2_i32 as isize) = (a >> 8_i32) as libc::c_uchar;
|
|
*output.offset(3_i32 as isize) = a as libc::c_uchar;
|
|
*output.offset(4_i32 as isize) = (b >> 24_i32) as libc::c_uchar;
|
|
*output.offset(5_i32 as isize) = (b >> 16_i32) as libc::c_uchar;
|
|
*output.offset(6_i32 as isize) = (b >> 8_i32) as libc::c_uchar;
|
|
*output.offset(7_i32 as isize) = b as libc::c_uchar;
|
|
*output.offset(8_i32 as isize) = (c >> 24_i32) as libc::c_uchar;
|
|
*output.offset(9_i32 as isize) = (c >> 16_i32) as libc::c_uchar;
|
|
*output.offset(10_i32 as isize) = (c >> 8_i32) as libc::c_uchar;
|
|
*output.offset(11_i32 as isize) = c as libc::c_uchar;
|
|
*output.offset(12_i32 as isize) = (d >> 24_i32) as libc::c_uchar;
|
|
*output.offset(13_i32 as isize) = (d >> 16_i32) as libc::c_uchar;
|
|
*output.offset(14_i32 as isize) = (d >> 8_i32) as libc::c_uchar;
|
|
*output.offset(15_i32 as isize) = d as libc::c_uchar;
|
|
*output.offset(16_i32 as isize) = (e >> 24_i32) as libc::c_uchar;
|
|
*output.offset(17_i32 as isize) = (e >> 16_i32) as libc::c_uchar;
|
|
*output.offset(18_i32 as isize) = (e >> 8_i32) as libc::c_uchar;
|
|
*output.offset(19_i32 as isize) = e as libc::c_uchar;
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn Mix(
|
|
mut buffer: *mut libc::c_uchar,
|
|
mut bufSize: size_t,
|
|
mut key: *const libc::c_uchar,
|
|
mut keySize: size_t,
|
|
) {
|
|
let mut sha1_input: [libc::c_uchar; 64] = [0; 64];
|
|
let mut sha1_result: [libc::c_uchar; 20] = [0; 20];
|
|
let mut half: size_t = bufSize.wrapping_div(2_i32 as u64);
|
|
let mut external_counter: i32 = 0;
|
|
external_counter = 0_i32;
|
|
while external_counter < 4_i32 {
|
|
memset(
|
|
sha1_input.as_mut_ptr() as *mut libc::c_void,
|
|
0_i32,
|
|
::std::mem::size_of::<[libc::c_uchar; 64]>() as u64,
|
|
);
|
|
memcpy(
|
|
sha1_input.as_mut_ptr() as *mut libc::c_void,
|
|
buffer.offset(half as isize) as *const libc::c_void,
|
|
half,
|
|
);
|
|
memcpy(
|
|
sha1_input.as_mut_ptr().offset(half as isize) as *mut libc::c_void,
|
|
key as *const libc::c_void,
|
|
keySize,
|
|
);
|
|
sha1_input[half.wrapping_add(keySize) as usize] = 0x80_i32 as libc::c_uchar;
|
|
sha1_input[(::std::mem::size_of::<[libc::c_uchar; 64]>() as u64).wrapping_sub(1_i32 as u64)
|
|
as usize] = half.wrapping_add(keySize).wrapping_mul(8_i32 as u64) as libc::c_uchar;
|
|
sha1_input[(::std::mem::size_of::<[libc::c_uchar; 64]>() as u64).wrapping_sub(2_i32 as u64)
|
|
as usize] = half
|
|
.wrapping_add(keySize)
|
|
.wrapping_mul(8_i32 as u64)
|
|
.wrapping_div(0x100_i32 as u64) as libc::c_uchar;
|
|
sha1_single_block(sha1_input.as_mut_ptr(), sha1_result.as_mut_ptr());
|
|
let mut i: size_t = 0;
|
|
i = half & !3_i32 as u64;
|
|
while i < half {
|
|
sha1_result[i as usize] = sha1_result[i
|
|
.wrapping_add(4_i32 as u64)
|
|
.wrapping_sub(half & 3_i32 as u64)
|
|
as usize];
|
|
i = i.wrapping_add(1);
|
|
}
|
|
i = 0_i32 as size_t;
|
|
while i < half {
|
|
let mut tmp: libc::c_uchar = *buffer.offset(i.wrapping_add(half) as isize);
|
|
*buffer.offset(i.wrapping_add(half) as isize) = (*buffer.offset(i as isize) as i32
|
|
^ sha1_result[i as usize] as i32)
|
|
as libc::c_uchar;
|
|
*buffer.offset(i as isize) = tmp;
|
|
i = i.wrapping_add(1);
|
|
}
|
|
external_counter += 1;
|
|
}
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn Unmix(
|
|
mut buffer: *mut libc::c_uchar,
|
|
mut bufSize: size_t,
|
|
mut key: *const libc::c_uchar,
|
|
mut keySize: size_t,
|
|
) {
|
|
let mut sha1_input: [libc::c_uchar; 64] = [0; 64];
|
|
let mut sha1_result: [libc::c_uchar; 20] = [0; 20];
|
|
let mut half: size_t = bufSize.wrapping_div(2_i32 as u64);
|
|
let mut external_counter: i32 = 0;
|
|
external_counter = 0_i32;
|
|
while external_counter < 4_i32 {
|
|
memset(
|
|
sha1_input.as_mut_ptr() as *mut libc::c_void,
|
|
0_i32,
|
|
::std::mem::size_of::<[libc::c_uchar; 64]>() as u64,
|
|
);
|
|
memcpy(
|
|
sha1_input.as_mut_ptr() as *mut libc::c_void,
|
|
buffer as *const libc::c_void,
|
|
half,
|
|
);
|
|
memcpy(
|
|
sha1_input.as_mut_ptr().offset(half as isize) as *mut libc::c_void,
|
|
key as *const libc::c_void,
|
|
keySize,
|
|
);
|
|
sha1_input[half.wrapping_add(keySize) as usize] = 0x80_i32 as libc::c_uchar;
|
|
sha1_input[(::std::mem::size_of::<[libc::c_uchar; 64]>() as u64).wrapping_sub(1_i32 as u64)
|
|
as usize] = half.wrapping_add(keySize).wrapping_mul(8_i32 as u64) as libc::c_uchar;
|
|
sha1_input[(::std::mem::size_of::<[libc::c_uchar; 64]>() as u64).wrapping_sub(2_i32 as u64)
|
|
as usize] = half
|
|
.wrapping_add(keySize)
|
|
.wrapping_mul(8_i32 as u64)
|
|
.wrapping_div(0x100_i32 as u64) as libc::c_uchar;
|
|
sha1_single_block(sha1_input.as_mut_ptr(), sha1_result.as_mut_ptr());
|
|
let mut i: size_t = 0;
|
|
i = half & !3_i32 as u64;
|
|
while i < half {
|
|
sha1_result[i as usize] = sha1_result[i
|
|
.wrapping_add(4_i32 as u64)
|
|
.wrapping_sub(half & 3_i32 as u64)
|
|
as usize];
|
|
i = i.wrapping_add(1);
|
|
}
|
|
i = 0_i32 as size_t;
|
|
while i < half {
|
|
let mut tmp: libc::c_uchar = *buffer.offset(i as isize);
|
|
*buffer.offset(i as isize) = (*buffer.offset(i.wrapping_add(half) as isize) as i32
|
|
^ sha1_result[i as usize] as i32)
|
|
as libc::c_uchar;
|
|
*buffer.offset(i.wrapping_add(half) as isize) = tmp;
|
|
i = i.wrapping_add(1);
|
|
}
|
|
external_counter += 1;
|
|
}
|
|
}
|
|
#[no_mangle]
|
|
unsafe extern "C" fn Generate(
|
|
mut installation_id_str: *const libc::c_char,
|
|
mut confirmation_id: *mut libc::c_char,
|
|
) -> i32 {
|
|
let mut installation_id: [libc::c_uchar; 19] = [0; 19];
|
|
let mut installation_id_len: size_t = 0_i32 as size_t;
|
|
let mut p: *const libc::c_char = installation_id_str;
|
|
let mut count: size_t = 0_i32 as size_t;
|
|
let mut totalCount: size_t = 0_i32 as size_t;
|
|
let mut check: u32 = 0_i32 as u32;
|
|
let mut i: size_t = 0;
|
|
while *p != 0 {
|
|
if !(*p as i32 == ' ' as i32 || *p as i32 == '-' as i32) {
|
|
let mut d: i32 = *p as i32 - '0' as i32;
|
|
if !(0_i32..=9_i32).contains(&d) {
|
|
return 3_i32;
|
|
}
|
|
if count == 5_i32 as u64 || *p.offset(1_i32 as isize) as i32 == 0_i32 {
|
|
if count == 0 {
|
|
return if totalCount == 45_i32 as u64 {
|
|
2_i32
|
|
} else {
|
|
1_i32
|
|
};
|
|
}
|
|
if d as u32 != check.wrapping_rem(7_i32 as u32) {
|
|
return if count < 5_i32 as u64 { 1_i32 } else { 4_i32 };
|
|
}
|
|
check = 0_i32 as u32;
|
|
count = 0_i32 as size_t;
|
|
} else {
|
|
check = check.wrapping_add(
|
|
(if count.wrapping_rem(2_i32 as u64) != 0 {
|
|
d * 2_i32
|
|
} else {
|
|
d
|
|
}) as u32,
|
|
);
|
|
count = count.wrapping_add(1);
|
|
totalCount = totalCount.wrapping_add(1);
|
|
if totalCount > 45_i32 as u64 {
|
|
return 2_i32;
|
|
}
|
|
let mut carry: libc::c_uchar = d as libc::c_uchar;
|
|
i = 0_i32 as size_t;
|
|
while i < installation_id_len {
|
|
let mut x: u32 =
|
|
(installation_id[i as usize] as i32 * 10_i32 + carry as i32) as u32;
|
|
installation_id[i as usize] = (x & 0xff_i32 as u32) as libc::c_uchar;
|
|
carry = (x >> 8_i32) as libc::c_uchar;
|
|
i = i.wrapping_add(1);
|
|
}
|
|
if carry != 0 {
|
|
let fresh1 = installation_id_len;
|
|
installation_id_len = installation_id_len.wrapping_add(1);
|
|
installation_id[fresh1 as usize] = carry;
|
|
}
|
|
}
|
|
}
|
|
p = p.offset(1);
|
|
}
|
|
if totalCount != 41_i32 as u64 && totalCount < 45_i32 as u64 {
|
|
return 1_i32;
|
|
}
|
|
while installation_id_len < ::std::mem::size_of::<[libc::c_uchar; 19]>() as u64 {
|
|
installation_id[installation_id_len as usize] = 0_i32 as libc::c_uchar;
|
|
installation_id_len = installation_id_len.wrapping_add(1);
|
|
}
|
|
static mut iid_key: [libc::c_uchar; 4] = [
|
|
0x6a_i32 as libc::c_uchar,
|
|
0xc8_i32 as libc::c_uchar,
|
|
0x5e_i32 as libc::c_uchar,
|
|
0xd4_i32 as libc::c_uchar,
|
|
];
|
|
Unmix(
|
|
installation_id.as_mut_ptr(),
|
|
(if totalCount == 41_i32 as u64 {
|
|
17_i32
|
|
} else {
|
|
19_i32
|
|
}) as size_t,
|
|
iid_key.as_ptr(),
|
|
4_i32 as size_t,
|
|
);
|
|
if installation_id[18_i32 as usize] as i32 >= 0x10_i32 {
|
|
return 5_i32;
|
|
}
|
|
let mut parsed: C2RustUnnamed_4 = C2RustUnnamed_4 {
|
|
HardwareID: 0,
|
|
ProductIDLow: 0,
|
|
ProductIDHigh: 0,
|
|
KeySHA1: 0,
|
|
};
|
|
memcpy(
|
|
&mut parsed as *mut C2RustUnnamed_4 as *mut libc::c_void,
|
|
installation_id.as_mut_ptr() as *const libc::c_void,
|
|
::std::mem::size_of::<C2RustUnnamed_4>() as u64,
|
|
);
|
|
let mut productId1: u32 = (parsed.ProductIDLow & ((1_i32 << 17_i32) - 1_i32) as u64) as u32;
|
|
let mut productId2: u32 =
|
|
(parsed.ProductIDLow >> 17_i32 & ((1_i32 << 10_i32) - 1_i32) as u64) as u32;
|
|
let mut productId3: u32 =
|
|
(parsed.ProductIDLow >> 27_i32 & ((1_i32 << 25_i32) - 1_i32) as u64) as u32;
|
|
let mut version: u32 = (parsed.ProductIDLow >> 52_i32 & 7_i32 as u64) as u32;
|
|
let mut productId4: u32 =
|
|
(parsed.ProductIDLow >> 55_i32 | ((parsed.ProductIDHigh as i32) << 9_i32) as u64) as u32;
|
|
if version
|
|
!= (if totalCount == 41_i32 as u64 {
|
|
4_i32
|
|
} else {
|
|
5_i32
|
|
}) as u32
|
|
{
|
|
return 5_i32;
|
|
}
|
|
let mut keybuf: [libc::c_uchar; 16] = [0; 16];
|
|
let mut hardware_id = parsed.HardwareID;
|
|
memcpy(
|
|
keybuf.as_mut_ptr() as *mut libc::c_void,
|
|
&mut hardware_id as *mut uint64_t as *const libc::c_void,
|
|
8_i32 as u64,
|
|
);
|
|
let mut productIdMixed: uint64_t = (productId1 as uint64_t) << 41_i32
|
|
| (productId2 as uint64_t) << 58_i32
|
|
| (productId3 as uint64_t) << 17_i32
|
|
| productId4 as u64;
|
|
memcpy(
|
|
keybuf.as_mut_ptr().offset(8_i32 as isize) as *mut libc::c_void,
|
|
&mut productIdMixed as *mut uint64_t as *const libc::c_void,
|
|
8_i32 as u64,
|
|
);
|
|
let mut d_0: TDivisor = TDivisor {
|
|
u: [0; 2],
|
|
v: [0; 2],
|
|
};
|
|
let mut attempt: libc::c_uchar = 0;
|
|
attempt = 0_i32 as libc::c_uchar;
|
|
while attempt as i32 <= 0x80_i32 {
|
|
let mut u: C2RustUnnamed_3 = C2RustUnnamed_3 { buffer: [0; 14] };
|
|
u.c2rust_unnamed.lo = 0_i32 as uint64_t;
|
|
u.c2rust_unnamed.hi = 0_i32 as uint64_t;
|
|
u.buffer[7_i32 as usize] = attempt;
|
|
Mix(
|
|
(u.buffer).as_mut_ptr(),
|
|
14_i32 as size_t,
|
|
keybuf.as_mut_ptr(),
|
|
16_i32 as size_t,
|
|
);
|
|
let mut x2: uint64_t = ui128_quotient_mod(u.c2rust_unnamed.lo, u.c2rust_unnamed.hi);
|
|
let mut x1: uint64_t =
|
|
u.c2rust_unnamed
|
|
.lo
|
|
.wrapping_sub(x2.wrapping_mul(0x16a6b036d7f2a79_u64)) as uint64_t;
|
|
x2 = x2.wrapping_add(1);
|
|
d_0.u[0_i32 as usize] = residue_sub(
|
|
residue_mul(x1, x1),
|
|
residue_mul(43_i32 as uint64_t, residue_mul(x2, x2)),
|
|
) as uint16_t;
|
|
d_0.u[1_i32 as usize] = residue_add(x1, x1) as uint16_t;
|
|
if find_divisor_v(&mut d_0) != 0 {
|
|
break;
|
|
}
|
|
attempt = attempt.wrapping_add(1);
|
|
}
|
|
if attempt as i32 > 0x80_i32 {
|
|
return 6_i32;
|
|
}
|
|
divisor_mul128(
|
|
&d_0,
|
|
0x4e21b9d10f127c1_i64 as uint64_t,
|
|
0x40da7c36d44c_i64 as uint64_t,
|
|
&mut d_0,
|
|
);
|
|
let mut e: C2RustUnnamed_0 = C2RustUnnamed_0 {
|
|
c2rust_unnamed: C2RustUnnamed_1 {
|
|
encoded_lo: 0,
|
|
encoded_hi: 0,
|
|
},
|
|
};
|
|
if d_0.u[0_i32 as usize] as u64 == 0xffffffffffffffff_u64 {
|
|
e.c2rust_unnamed.encoded_lo = __umul128(
|
|
0x16a6b036d7f2a79_u64.wrapping_add(2_i32 as u64) as uint64_t,
|
|
0x16a6b036d7f2a79_u64 as uint64_t,
|
|
&mut e.c2rust_unnamed.encoded_hi,
|
|
);
|
|
} else if d_0.u[1_i32 as usize] as u64 == 0xffffffffffffffff_u64 {
|
|
e.c2rust_unnamed.encoded_lo = __umul128(
|
|
0x16a6b036d7f2a79_u64.wrapping_add(1_i32 as u64) as uint64_t,
|
|
d_0.u[0_i32 as usize] as uint64_t,
|
|
&mut e.c2rust_unnamed.encoded_hi,
|
|
);
|
|
e.c2rust_unnamed.encoded_lo =
|
|
e.c2rust_unnamed
|
|
.encoded_lo
|
|
.wrapping_add(0x16a6b036d7f2a79_u64) as uint64_t as uint64_t;
|
|
e.c2rust_unnamed.encoded_hi = e
|
|
.c2rust_unnamed
|
|
.encoded_hi
|
|
.wrapping_add((e.c2rust_unnamed.encoded_lo < 0x16a6b036d7f2a79_u64) as i32 as u64)
|
|
as uint64_t as uint64_t;
|
|
} else {
|
|
let mut x1_0: uint64_t = (if d_0.u[1_i32 as usize] as i32 % 2_i32 != 0 {
|
|
(d_0.u[1_i32 as usize] as u64).wrapping_add(0x16a6b036d7f2a79_u64)
|
|
} else {
|
|
d_0.u[1_i32 as usize] as u64
|
|
})
|
|
.wrapping_div(2_i32 as u64) as uint64_t;
|
|
let mut x2sqr: uint64_t =
|
|
residue_sub(residue_mul(x1_0, x1_0), d_0.u[0_i32 as usize] as uint64_t);
|
|
let mut x2_0: uint64_t = residue_sqrt(x2sqr);
|
|
if x2_0 == 0xffffffffffffffff_u64 {
|
|
x2_0 = residue_sqrt(residue_mul(x2sqr, residue_inv(43_i32 as uint64_t)));
|
|
e.c2rust_unnamed.encoded_lo = __umul128(
|
|
0x16a6b036d7f2a79_u64.wrapping_add(1_i32 as u64) as uint64_t,
|
|
0x16a6b036d7f2a79_u64.wrapping_add(x2_0) as uint64_t,
|
|
&mut e.c2rust_unnamed.encoded_hi,
|
|
);
|
|
e.c2rust_unnamed.encoded_lo =
|
|
e.c2rust_unnamed.encoded_lo.wrapping_add(x1_0) as uint64_t as uint64_t;
|
|
e.c2rust_unnamed.encoded_hi = e
|
|
.c2rust_unnamed
|
|
.encoded_hi
|
|
.wrapping_add((e.c2rust_unnamed.encoded_lo < x1_0) as i32 as u64)
|
|
as uint64_t as uint64_t;
|
|
} else {
|
|
let mut x1a: uint64_t = residue_sub(x1_0, x2_0);
|
|
let mut y1: uint64_t = residue_sub(
|
|
d_0.v[0_i32 as usize] as uint64_t,
|
|
residue_mul(d_0.v[1_i32 as usize] as uint64_t, x1a),
|
|
);
|
|
let mut x2a: uint64_t = residue_add(x1_0, x2_0);
|
|
let mut y2: uint64_t = residue_sub(
|
|
d_0.v[0_i32 as usize] as uint64_t,
|
|
residue_mul(d_0.v[1_i32 as usize] as uint64_t, x2a),
|
|
);
|
|
if x1a > x2a {
|
|
std::mem::swap(&mut x1a, &mut x2a);
|
|
}
|
|
if (y1 ^ y2) & 1_i32 as u64 != 0 {
|
|
std::mem::swap(&mut x1a, &mut x2a);
|
|
}
|
|
e.c2rust_unnamed.encoded_lo = __umul128(
|
|
0x16a6b036d7f2a79_u64.wrapping_add(1_i32 as u64) as uint64_t,
|
|
x1a,
|
|
&mut e.c2rust_unnamed.encoded_hi,
|
|
);
|
|
e.c2rust_unnamed.encoded_lo =
|
|
e.c2rust_unnamed.encoded_lo.wrapping_add(x2a) as uint64_t as uint64_t;
|
|
e.c2rust_unnamed.encoded_hi = e
|
|
.c2rust_unnamed
|
|
.encoded_hi
|
|
.wrapping_add((e.c2rust_unnamed.encoded_lo < x2a) as i32 as u64)
|
|
as uint64_t as uint64_t;
|
|
}
|
|
}
|
|
let mut decimal: [libc::c_uchar; 35] = [0; 35];
|
|
i = 0_i32 as size_t;
|
|
while i < 35_i32 as u64 {
|
|
let mut c: u32 = (e.c2rust_unnamed_0.encoded[3_i32 as usize]).wrapping_rem(10_i32 as u32);
|
|
e.c2rust_unnamed_0.encoded[3_i32 as usize] = e.c2rust_unnamed_0.encoded[3_i32 as usize]
|
|
.wrapping_div(10_i32 as u32)
|
|
as uint32_t as uint32_t;
|
|
let mut c2: u32 = ((c as uint64_t) << 32_i32
|
|
| e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64)
|
|
.wrapping_rem(10_i32 as u64) as u32;
|
|
e.c2rust_unnamed_0.encoded[2_i32 as usize] =
|
|
((c as uint64_t) << 32_i32 | e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64)
|
|
.wrapping_div(10_i32 as u64) as uint32_t;
|
|
let mut c3: u32 = ((c2 as uint64_t) << 32_i32
|
|
| e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64)
|
|
.wrapping_rem(10_i32 as u64) as u32;
|
|
e.c2rust_unnamed_0.encoded[1_i32 as usize] =
|
|
((c2 as uint64_t) << 32_i32 | e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64)
|
|
.wrapping_div(10_i32 as u64) as uint32_t;
|
|
let mut c4: u32 = ((c3 as uint64_t) << 32_i32
|
|
| e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64)
|
|
.wrapping_rem(10_i32 as u64) as u32;
|
|
e.c2rust_unnamed_0.encoded[0_i32 as usize] =
|
|
((c3 as uint64_t) << 32_i32 | e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64)
|
|
.wrapping_div(10_i32 as u64) as uint32_t;
|
|
decimal[(34_i32 as u64).wrapping_sub(i) as usize] = c4 as libc::c_uchar;
|
|
i = i.wrapping_add(1);
|
|
}
|
|
let mut q: *mut libc::c_char = confirmation_id;
|
|
i = 0_i32 as size_t;
|
|
while i < 7_i32 as u64 {
|
|
if i != 0 {
|
|
let fresh2 = q;
|
|
q = q.offset(1);
|
|
*fresh2 = '-' as i32 as libc::c_char;
|
|
}
|
|
let mut p_0: *mut libc::c_uchar = decimal
|
|
.as_mut_ptr()
|
|
.offset(i.wrapping_mul(5_i32 as u64) as isize);
|
|
*q.offset(0_i32 as isize) =
|
|
(*p_0.offset(0_i32 as isize) as i32 + '0' as i32) as libc::c_char;
|
|
*q.offset(1_i32 as isize) =
|
|
(*p_0.offset(1_i32 as isize) as i32 + '0' as i32) as libc::c_char;
|
|
*q.offset(2_i32 as isize) =
|
|
(*p_0.offset(2_i32 as isize) as i32 + '0' as i32) as libc::c_char;
|
|
*q.offset(3_i32 as isize) =
|
|
(*p_0.offset(3_i32 as isize) as i32 + '0' as i32) as libc::c_char;
|
|
*q.offset(4_i32 as isize) =
|
|
(*p_0.offset(4_i32 as isize) as i32 + '0' as i32) as libc::c_char;
|
|
*q.offset(5_i32 as isize) = ((*p_0.offset(0_i32 as isize) as i32
|
|
+ *p_0.offset(1_i32 as isize) as i32 * 2_i32
|
|
+ *p_0.offset(2_i32 as isize) as i32
|
|
+ *p_0.offset(3_i32 as isize) as i32 * 2_i32
|
|
+ *p_0.offset(4_i32 as isize) as i32)
|
|
% 7_i32
|
|
+ '0' as i32) as libc::c_char;
|
|
q = q.offset(6_i32 as isize);
|
|
i = i.wrapping_add(1);
|
|
}
|
|
let fresh3 = q;
|
|
q = q.offset(1);
|
|
*fresh3 = 0_i32 as libc::c_char;
|
|
0_i32
|
|
}
|
|
|
|
#[derive(Error, Debug)]
|
|
pub enum ConfirmationIdError {
|
|
#[error("Installation ID is too short.")]
|
|
TooShort,
|
|
#[error("Installation ID is too long.")]
|
|
TooLarge,
|
|
#[error("Invalid character in installation ID.")]
|
|
InvalidCharacter,
|
|
#[error("Installation ID checksum failed. Please check that it is typed correctly.")]
|
|
InvalidCheckDigit,
|
|
#[error("Unknown installation ID version.")]
|
|
UnknownVersion,
|
|
#[error("Unable to generate valid confirmation ID.")]
|
|
Unlucky,
|
|
}
|
|
|
|
pub fn generate(installation_id: &str) -> Result<String, ConfirmationIdError> {
|
|
unsafe {
|
|
let inst_id = CString::new(installation_id).unwrap();
|
|
let conf_id = [0u8; 49];
|
|
let result = Generate(inst_id.as_ptr(), conf_id.as_ptr() as *mut libc::c_char);
|
|
match result {
|
|
0 => {}
|
|
1 => return Err(ConfirmationIdError::TooShort),
|
|
2 => return Err(ConfirmationIdError::TooLarge),
|
|
3 => return Err(ConfirmationIdError::InvalidCharacter),
|
|
4 => return Err(ConfirmationIdError::InvalidCheckDigit),
|
|
5 => return Err(ConfirmationIdError::UnknownVersion),
|
|
6 => return Err(ConfirmationIdError::Unlucky),
|
|
_ => panic!("Unknown error code: {}", result),
|
|
}
|
|
Ok(CStr::from_ptr(conf_id.as_ptr() as *const libc::c_char)
|
|
.to_str()
|
|
.unwrap()
|
|
.to_string())
|
|
}
|
|
}
|