Remove C-style types
This commit is contained in:
parent
ede990ae44
commit
ecce5e797f
1 changed files with 239 additions and 257 deletions
496
src/confid.rs
496
src/confid.rs
|
@ -13,20 +13,16 @@ use std::{
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
type size_t = u64;
|
type size_t = u64;
|
||||||
type int64_t = i64;
|
|
||||||
type uint16_t = u16;
|
|
||||||
type uint32_t = u32;
|
|
||||||
type uint64_t = u64;
|
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
struct TDivisor {
|
struct TDivisor {
|
||||||
u: [uint16_t; 2],
|
u: [u16; 2],
|
||||||
v: [uint16_t; 2],
|
v: [u16; 2],
|
||||||
}
|
}
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
struct C2RustUnnamed {
|
struct C2RustUnnamed {
|
||||||
encoded: [uint32_t; 4],
|
encoded: [u32; 4],
|
||||||
}
|
}
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
|
@ -37,14 +33,14 @@ union C2RustUnnamed_0 {
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
struct C2RustUnnamed_1 {
|
struct C2RustUnnamed_1 {
|
||||||
encoded_lo: uint64_t,
|
encoded_lo: u64,
|
||||||
encoded_hi: uint64_t,
|
encoded_hi: u64,
|
||||||
}
|
}
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
struct C2RustUnnamed_2 {
|
struct C2RustUnnamed_2 {
|
||||||
lo: uint64_t,
|
lo: u64,
|
||||||
hi: uint64_t,
|
hi: u64,
|
||||||
}
|
}
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
|
@ -55,34 +51,37 @@ union C2RustUnnamed_3 {
|
||||||
#[derive(Copy, Clone)]
|
#[derive(Copy, Clone)]
|
||||||
#[repr(C, packed)]
|
#[repr(C, packed)]
|
||||||
struct C2RustUnnamed_4 {
|
struct C2RustUnnamed_4 {
|
||||||
HardwareID: uint64_t,
|
HardwareID: u64,
|
||||||
ProductIDLow: uint64_t,
|
ProductIDLow: u64,
|
||||||
ProductIDHigh: u8,
|
ProductIDHigh: u8,
|
||||||
KeySHA1: u16,
|
KeySHA1: u16,
|
||||||
}
|
}
|
||||||
static mut f: [uint64_t; 6] = [
|
static mut f: [u64; 6] = [
|
||||||
0_i32 as uint64_t,
|
0,
|
||||||
0x21840136c85381_u64 as uint64_t,
|
0x21840136c85381,
|
||||||
0x44197b83892ad0_u64 as uint64_t,
|
0x44197b83892ad0,
|
||||||
0x1400606322b3b04_u64 as uint64_t,
|
0x1400606322b3b04,
|
||||||
0x1400606322b3b04_u64 as uint64_t,
|
0x1400606322b3b04,
|
||||||
1_i32 as uint64_t,
|
1,
|
||||||
];
|
];
|
||||||
|
|
||||||
fn residue_add(x: uint64_t, y: uint64_t) -> uint64_t {
|
const MOD: u64 = 0x16A6B036D7F2A79;
|
||||||
let mut z: uint64_t = x.wrapping_add(y);
|
const BAD: u64 = 0xffffffffffffffff;
|
||||||
|
|
||||||
|
fn residue_add(x: u64, y: u64) -> u64 {
|
||||||
|
let mut z: u64 = x.wrapping_add(y);
|
||||||
//z = z - (z >= MOD ? MOD : 0);
|
//z = z - (z >= MOD ? MOD : 0);
|
||||||
if z >= 0x16a6b036d7f2a79_u64 {
|
if z >= MOD {
|
||||||
z = z.wrapping_sub(0x16a6b036d7f2a79_u64) as uint64_t as uint64_t;
|
z = z.wrapping_sub(MOD);
|
||||||
}
|
}
|
||||||
z
|
z
|
||||||
}
|
}
|
||||||
|
|
||||||
fn residue_sub(x: uint64_t, y: uint64_t) -> uint64_t {
|
fn residue_sub(x: u64, y: u64) -> u64 {
|
||||||
let mut z: uint64_t = x.wrapping_sub(y);
|
let mut z: u64 = x.wrapping_sub(y);
|
||||||
//z += (x < y ? MOD : 0);
|
//z += (x < y ? MOD : 0);
|
||||||
if x < y {
|
if x < y {
|
||||||
z = z.wrapping_add(0x16a6b036d7f2a79_u64) as uint64_t as uint64_t;
|
z = z.wrapping_add(MOD);
|
||||||
}
|
}
|
||||||
z
|
z
|
||||||
}
|
}
|
||||||
|
@ -116,24 +115,24 @@ fn ui128_quotient_mod(lo: u64, hi: u64) -> u64 {
|
||||||
prod3lo >> 42_i32 | prod3hi << 22_i32
|
prod3lo >> 42_i32 | prod3hi << 22_i32
|
||||||
}
|
}
|
||||||
|
|
||||||
fn residue_mul(x: uint64_t, y: uint64_t) -> uint64_t {
|
fn residue_mul(x: u64, y: u64) -> u64 {
|
||||||
// * ceil(2**170/MOD) = 0x2d351 c6d04f8b|604fa6a1 c6346a87 for (p-1)*(p-1) max
|
// * ceil(2**170/MOD) = 0x2d351 c6d04f8b|604fa6a1 c6346a87 for (p-1)*(p-1) max
|
||||||
let mut hi: uint64_t = 0;
|
let mut hi: u64 = 0;
|
||||||
let lo: uint64_t = umul128(x, y, &mut hi);
|
let lo: u64 = umul128(x, y, &mut hi);
|
||||||
let quotient: uint64_t = ui128_quotient_mod(lo, hi);
|
let quotient: u64 = ui128_quotient_mod(lo, hi);
|
||||||
lo.wrapping_sub(quotient.wrapping_mul(0x16a6b036d7f2a79_u64)) as uint64_t
|
lo.wrapping_sub(quotient.wrapping_mul(MOD))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn residue_pow(x: uint64_t, mut y: uint64_t) -> uint64_t {
|
fn residue_pow(x: u64, mut y: u64) -> u64 {
|
||||||
if y == 0_i32 as u64 {
|
if y == 0_i32 as u64 {
|
||||||
return 1_i32 as uint64_t;
|
return 1_i32 as u64;
|
||||||
}
|
}
|
||||||
let mut cur: uint64_t = x;
|
let mut cur: u64 = x;
|
||||||
while y & 1_i32 as u64 == 0 {
|
while y & 1_i32 as u64 == 0 {
|
||||||
cur = residue_mul(cur, cur);
|
cur = residue_mul(cur, cur);
|
||||||
y >>= 1_i32;
|
y >>= 1_i32;
|
||||||
}
|
}
|
||||||
let mut res: uint64_t = cur;
|
let mut res: u64 = cur;
|
||||||
loop {
|
loop {
|
||||||
y >>= 1_i32;
|
y >>= 1_i32;
|
||||||
if y == 0_i32 as u64 {
|
if y == 0_i32 as u64 {
|
||||||
|
@ -147,37 +146,36 @@ fn residue_pow(x: uint64_t, mut y: uint64_t) -> uint64_t {
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inverse(mut u: uint64_t, mut v: uint64_t) -> uint64_t {
|
fn inverse(mut u: u64, mut v: u64) -> u64 {
|
||||||
let mut tmp;
|
let mut tmp;
|
||||||
let mut xu: int64_t = 1_i32 as int64_t;
|
let mut xu: i64 = 1_i32 as i64;
|
||||||
let mut xv: int64_t = 0_i32 as int64_t;
|
let mut xv: i64 = 0_i32 as i64;
|
||||||
let v0: uint64_t = v;
|
let v0: u64 = v;
|
||||||
while u > 1_i32 as u64 {
|
while u > 1_i32 as u64 {
|
||||||
let d: uint64_t = v.wrapping_div(u);
|
let d: u64 = v.wrapping_div(u);
|
||||||
let remainder: uint64_t = v.wrapping_rem(u);
|
let remainder: u64 = v.wrapping_rem(u);
|
||||||
tmp = u as int64_t;
|
tmp = u as i64;
|
||||||
u = remainder;
|
u = remainder;
|
||||||
v = tmp as uint64_t;
|
v = tmp as u64;
|
||||||
tmp = xu;
|
tmp = xu;
|
||||||
xu = (xv as u64).wrapping_sub(d.wrapping_mul(xu as u64)) as int64_t;
|
xu = (xv as u64).wrapping_sub(d.wrapping_mul(xu as u64)) as i64;
|
||||||
xv = tmp;
|
xv = tmp;
|
||||||
}
|
}
|
||||||
xu = (xu as u64).wrapping_add(if xu < 0_i32 as i64 { v0 } else { 0_i32 as u64 }) as int64_t
|
xu = (xu as u64).wrapping_add(if xu < 0_i32 as i64 { v0 } else { 0_i32 as u64 }) as i64;
|
||||||
as int64_t;
|
xu as u64
|
||||||
xu as uint64_t
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn residue_inv(x: uint64_t) -> uint64_t {
|
fn residue_inv(x: u64) -> u64 {
|
||||||
inverse(x, 0x16a6b036d7f2a79_u64 as uint64_t)
|
inverse(x, MOD)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn residue_sqrt(what: uint64_t) -> uint64_t {
|
fn residue_sqrt(what: u64) -> u64 {
|
||||||
if what == 0 {
|
if what == 0 {
|
||||||
return 0_i32 as uint64_t;
|
return 0_i32 as u64;
|
||||||
}
|
}
|
||||||
let g: uint64_t = 43_i32 as uint64_t;
|
let g: u64 = 43_i32 as u64;
|
||||||
let mut e: uint64_t = 0_i32 as uint64_t;
|
let mut e: u64 = 0_i32 as u64;
|
||||||
let mut q: uint64_t = 0x16a6b036d7f2a79_u64.wrapping_sub(1_i32 as u64) as uint64_t;
|
let mut q: u64 = MOD.wrapping_sub(1_i32 as u64);
|
||||||
while q & 1_i32 as u64 == 0 {
|
while q & 1_i32 as u64 == 0 {
|
||||||
e = e.wrapping_add(1);
|
e = e.wrapping_add(1);
|
||||||
q >>= 1_i32;
|
q >>= 1_i32;
|
||||||
|
@ -192,8 +190,8 @@ fn residue_sqrt(what: uint64_t) -> uint64_t {
|
||||||
let mut b = residue_mul(residue_mul(what, x), x);
|
let mut b = residue_mul(residue_mul(what, x), x);
|
||||||
x = residue_mul(what, x);
|
x = residue_mul(what, x);
|
||||||
while b != 1_i32 as u64 {
|
while b != 1_i32 as u64 {
|
||||||
let mut m: uint64_t = 0_i32 as uint64_t;
|
let mut m: u64 = 0_i32 as u64;
|
||||||
let mut b2: uint64_t = b;
|
let mut b2: u64 = b;
|
||||||
loop {
|
loop {
|
||||||
m = m.wrapping_add(1);
|
m = m.wrapping_add(1);
|
||||||
b2 = residue_mul(b2, b2);
|
b2 = residue_mul(b2, b2);
|
||||||
|
@ -202,11 +200,11 @@ fn residue_sqrt(what: uint64_t) -> uint64_t {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if m == r {
|
if m == r {
|
||||||
return 0xffffffffffffffff_u64 as uint64_t;
|
return BAD;
|
||||||
}
|
}
|
||||||
let t = residue_pow(
|
let t = residue_pow(
|
||||||
y,
|
y,
|
||||||
(1_i32 << r.wrapping_sub(m).wrapping_sub(1_i32 as u64)) as uint64_t,
|
(1_i32 << r.wrapping_sub(m).wrapping_sub(1_i32 as u64)) as u64,
|
||||||
);
|
);
|
||||||
y = residue_mul(t, t);
|
y = residue_mul(t, t);
|
||||||
r = m;
|
r = m;
|
||||||
|
@ -214,7 +212,7 @@ fn residue_sqrt(what: uint64_t) -> uint64_t {
|
||||||
b = residue_mul(b, y);
|
b = residue_mul(b, y);
|
||||||
}
|
}
|
||||||
if residue_mul(x, x) != what {
|
if residue_mul(x, x) != what {
|
||||||
return 0xffffffffffffffff_u64 as uint64_t;
|
return BAD;
|
||||||
}
|
}
|
||||||
x
|
x
|
||||||
}
|
}
|
||||||
|
@ -224,14 +222,14 @@ unsafe fn find_divisor_v(mut d: *mut TDivisor) -> i32 {
|
||||||
// u = u0 + u1*x + x^2
|
// u = u0 + u1*x + x^2
|
||||||
// f%u = f0 + f1*x
|
// f%u = f0 + f1*x
|
||||||
let mut v1;
|
let mut v1;
|
||||||
let mut f2: [uint64_t; 6] = [0; 6];
|
let mut f2: [u64; 6] = [0; 6];
|
||||||
let mut i: i32 = 0_i32;
|
let mut i: i32 = 0_i32;
|
||||||
while i < 6_i32 {
|
while i < 6_i32 {
|
||||||
f2[i as usize] = f[i as usize];
|
f2[i as usize] = f[i as usize];
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
let u0: uint64_t = (*d).u[0_i32 as usize] as uint64_t;
|
let u0: u64 = (*d).u[0_i32 as usize] as u64;
|
||||||
let u1: uint64_t = (*d).u[1_i32 as usize] as uint64_t;
|
let u1: u64 = (*d).u[1_i32 as usize] as u64;
|
||||||
let mut j: i32 = 4_i32;
|
let mut j: i32 = 4_i32;
|
||||||
loop {
|
loop {
|
||||||
let fresh0 = j;
|
let fresh0 = j;
|
||||||
|
@ -244,7 +242,7 @@ unsafe fn find_divisor_v(mut d: *mut TDivisor) -> i32 {
|
||||||
f2[(j + 1_i32) as usize],
|
f2[(j + 1_i32) as usize],
|
||||||
residue_mul(u1, f2[(j + 2_i32) as usize]),
|
residue_mul(u1, f2[(j + 2_i32) as usize]),
|
||||||
);
|
);
|
||||||
f2[(j + 2_i32) as usize] = 0_i32 as uint64_t;
|
f2[(j + 2_i32) as usize] = 0_i32 as u64;
|
||||||
}
|
}
|
||||||
// v = v0 + v1*x
|
// v = v0 + v1*x
|
||||||
// u | (v0^2 - f0) + (2*v0*v1 - f1)*x + v1^2*x^2 = u0*v1^2 + u1*v1^2*x + v1^2*x^2
|
// u | (v0^2 - f0) + (2*v0*v1 - f1)*x + v1^2*x^2 = u0*v1^2 + u1*v1^2*x + v1^2*x^2
|
||||||
|
@ -253,11 +251,11 @@ unsafe fn find_divisor_v(mut d: *mut TDivisor) -> i32 {
|
||||||
// v0^2 = f0 + u0*v1^2 = (f1 + u1*v1^2)^2 / (2*v1)^2
|
// v0^2 = f0 + u0*v1^2 = (f1 + u1*v1^2)^2 / (2*v1)^2
|
||||||
// (f1^2) + 2*(f1*u1-2*f0) * v1^2 + (u1^2-4*u0) * v1^4 = 0
|
// (f1^2) + 2*(f1*u1-2*f0) * v1^2 + (u1^2-4*u0) * v1^4 = 0
|
||||||
// v1^2 = ((2*f0-f1*u1) +- 2*sqrt(-f0*f1*u1 + f0^2 + f1^2*u0))) / (u1^2-4*u0)
|
// v1^2 = ((2*f0-f1*u1) +- 2*sqrt(-f0*f1*u1 + f0^2 + f1^2*u0))) / (u1^2-4*u0)
|
||||||
let f0: uint64_t = f2[0_i32 as usize];
|
let f0: u64 = f2[0_i32 as usize];
|
||||||
let f1: uint64_t = f2[1_i32 as usize];
|
let f1: u64 = f2[1_i32 as usize];
|
||||||
let u0double: uint64_t = residue_add(u0, u0);
|
let u0double: u64 = residue_add(u0, u0);
|
||||||
let coeff2: uint64_t = residue_sub(residue_mul(u1, u1), residue_add(u0double, u0double));
|
let coeff2: u64 = residue_sub(residue_mul(u1, u1), residue_add(u0double, u0double));
|
||||||
let coeff1: uint64_t = residue_sub(residue_add(f0, f0), residue_mul(f1, u1));
|
let coeff1: u64 = residue_sub(residue_add(f0, f0), residue_mul(f1, u1));
|
||||||
if coeff2 == 0_i32 as u64 {
|
if coeff2 == 0_i32 as u64 {
|
||||||
if coeff1 == 0_i32 as u64 {
|
if coeff1 == 0_i32 as u64 {
|
||||||
if f1 == 0_i32 as u64 {
|
if f1 == 0_i32 as u64 {
|
||||||
|
@ -266,59 +264,59 @@ unsafe fn find_divisor_v(mut d: *mut TDivisor) -> i32 {
|
||||||
}
|
}
|
||||||
return 0_i32;
|
return 0_i32;
|
||||||
}
|
}
|
||||||
let sqr: uint64_t = residue_mul(
|
let sqr: u64 = residue_mul(
|
||||||
residue_mul(f1, f1),
|
residue_mul(f1, f1),
|
||||||
residue_inv(residue_add(coeff1, coeff1)),
|
residue_inv(residue_add(coeff1, coeff1)),
|
||||||
);
|
);
|
||||||
v1 = residue_sqrt(sqr);
|
v1 = residue_sqrt(sqr);
|
||||||
if v1 == 0xffffffffffffffff_u64 {
|
if v1 == BAD {
|
||||||
return 0_i32;
|
return 0_i32;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let mut d_0: uint64_t = residue_add(
|
let mut d_0: u64 = residue_add(
|
||||||
residue_mul(f0, f0),
|
residue_mul(f0, f0),
|
||||||
residue_mul(f1, residue_sub(residue_mul(f1, u0), residue_mul(f0, u1))),
|
residue_mul(f1, residue_sub(residue_mul(f1, u0), residue_mul(f0, u1))),
|
||||||
);
|
);
|
||||||
d_0 = residue_sqrt(d_0);
|
d_0 = residue_sqrt(d_0);
|
||||||
if d_0 == 0xffffffffffffffff_u64 {
|
if d_0 == BAD {
|
||||||
return 0_i32;
|
return 0_i32;
|
||||||
}
|
}
|
||||||
d_0 = residue_add(d_0, d_0);
|
d_0 = residue_add(d_0, d_0);
|
||||||
let inv: uint64_t = residue_inv(coeff2);
|
let inv: u64 = residue_inv(coeff2);
|
||||||
let mut root: uint64_t = residue_mul(residue_add(coeff1, d_0), inv);
|
let mut root: u64 = residue_mul(residue_add(coeff1, d_0), inv);
|
||||||
v1 = residue_sqrt(root);
|
v1 = residue_sqrt(root);
|
||||||
if v1 == 0xffffffffffffffff_u64 {
|
if v1 == BAD {
|
||||||
root = residue_mul(residue_sub(coeff1, d_0), inv);
|
root = residue_mul(residue_sub(coeff1, d_0), inv);
|
||||||
v1 = residue_sqrt(root);
|
v1 = residue_sqrt(root);
|
||||||
if v1 == 0xffffffffffffffff_u64 {
|
if v1 == BAD {
|
||||||
return 0_i32;
|
return 0_i32;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let v0: uint64_t = residue_mul(
|
let v0: u64 = residue_mul(
|
||||||
residue_add(f1, residue_mul(u1, residue_mul(v1, v1))),
|
residue_add(f1, residue_mul(u1, residue_mul(v1, v1))),
|
||||||
residue_inv(residue_add(v1, v1)),
|
residue_inv(residue_add(v1, v1)),
|
||||||
);
|
);
|
||||||
(*d).v[0_i32 as usize] = v0 as uint16_t;
|
(*d).v[0_i32 as usize] = v0 as u16;
|
||||||
(*d).v[1_i32 as usize] = v1 as uint16_t;
|
(*d).v[1_i32 as usize] = v1 as u16;
|
||||||
1_i32
|
1_i32
|
||||||
}
|
}
|
||||||
|
|
||||||
/// generic short slow code
|
/// generic short slow code
|
||||||
unsafe fn polynomial_mul(
|
unsafe fn polynomial_mul(
|
||||||
adeg: i32,
|
adeg: i32,
|
||||||
a: *const uint64_t,
|
a: *const u64,
|
||||||
bdeg: i32,
|
bdeg: i32,
|
||||||
b: *const uint64_t,
|
b: *const u64,
|
||||||
mut resultprevdeg: i32,
|
mut resultprevdeg: i32,
|
||||||
result: *mut uint64_t,
|
result: *mut u64,
|
||||||
) -> i32 {
|
) -> i32 {
|
||||||
if adeg < 0_i32 || bdeg < 0_i32 {
|
if adeg < 0_i32 || bdeg < 0_i32 {
|
||||||
return resultprevdeg;
|
return resultprevdeg;
|
||||||
}
|
}
|
||||||
let mut i = resultprevdeg + 1_i32;
|
let mut i = resultprevdeg + 1_i32;
|
||||||
while i <= adeg + bdeg {
|
while i <= adeg + bdeg {
|
||||||
*result.offset(i as isize) = 0_i32 as uint64_t;
|
*result.offset(i as isize) = 0_i32 as u64;
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
resultprevdeg = i - 1_i32;
|
resultprevdeg = i - 1_i32;
|
||||||
|
@ -342,14 +340,14 @@ unsafe fn polynomial_mul(
|
||||||
|
|
||||||
unsafe fn polynomial_div_monic(
|
unsafe fn polynomial_div_monic(
|
||||||
adeg: i32,
|
adeg: i32,
|
||||||
a: *mut uint64_t,
|
a: *mut u64,
|
||||||
bdeg: i32,
|
bdeg: i32,
|
||||||
b: *const uint64_t,
|
b: *const u64,
|
||||||
quotient: *mut uint64_t,
|
quotient: *mut u64,
|
||||||
) -> i32 {
|
) -> i32 {
|
||||||
let mut i = adeg - bdeg;
|
let mut i = adeg - bdeg;
|
||||||
while i >= 0_i32 {
|
while i >= 0_i32 {
|
||||||
let q: uint64_t = *a.offset((i + bdeg) as isize);
|
let q: u64 = *a.offset((i + bdeg) as isize);
|
||||||
if !quotient.is_null() {
|
if !quotient.is_null() {
|
||||||
*quotient.offset(i as isize) = q;
|
*quotient.offset(i as isize) = q;
|
||||||
}
|
}
|
||||||
|
@ -361,7 +359,7 @@ unsafe fn polynomial_div_monic(
|
||||||
);
|
);
|
||||||
j += 1;
|
j += 1;
|
||||||
}
|
}
|
||||||
*a.offset((i + j) as isize) = 0_i32 as uint64_t;
|
*a.offset((i + j) as isize) = 0_i32 as u64;
|
||||||
i -= 1;
|
i -= 1;
|
||||||
}
|
}
|
||||||
i += bdeg;
|
i += bdeg;
|
||||||
|
@ -373,30 +371,30 @@ unsafe fn polynomial_div_monic(
|
||||||
|
|
||||||
unsafe fn polynomial_xgcd(
|
unsafe fn polynomial_xgcd(
|
||||||
adeg: i32,
|
adeg: i32,
|
||||||
a: *const uint64_t,
|
a: *const u64,
|
||||||
bdeg: i32,
|
bdeg: i32,
|
||||||
b: *const uint64_t,
|
b: *const u64,
|
||||||
pgcddeg: *mut i32,
|
pgcddeg: *mut i32,
|
||||||
gcd: *mut uint64_t,
|
gcd: *mut u64,
|
||||||
pmult1deg: *mut i32,
|
pmult1deg: *mut i32,
|
||||||
mult1: *mut uint64_t,
|
mult1: *mut u64,
|
||||||
pmult2deg: *mut i32,
|
pmult2deg: *mut i32,
|
||||||
mult2: *mut uint64_t,
|
mult2: *mut u64,
|
||||||
) {
|
) {
|
||||||
let mut sdeg: i32 = -1_i32;
|
let mut sdeg: i32 = -1_i32;
|
||||||
let mut s: [uint64_t; 3] = [0_i32 as uint64_t, 0_i32 as uint64_t, 0_i32 as uint64_t];
|
let mut s: [u64; 3] = [0_i32 as u64, 0_i32 as u64, 0_i32 as u64];
|
||||||
let mut mult1deg: i32 = 0_i32;
|
let mut mult1deg: i32 = 0_i32;
|
||||||
*mult1.offset(0_i32 as isize) = 1_i32 as uint64_t;
|
*mult1.offset(0_i32 as isize) = 1_i32 as u64;
|
||||||
*mult1.offset(1_i32 as isize) = 0_i32 as uint64_t;
|
*mult1.offset(1_i32 as isize) = 0_i32 as u64;
|
||||||
*mult1.offset(2_i32 as isize) = 0_i32 as uint64_t;
|
*mult1.offset(2_i32 as isize) = 0_i32 as u64;
|
||||||
let mut tdeg: i32 = 0_i32;
|
let mut tdeg: i32 = 0_i32;
|
||||||
let mut t: [uint64_t; 3] = [1_i32 as uint64_t, 0_i32 as uint64_t, 0_i32 as uint64_t];
|
let mut t: [u64; 3] = [1_i32 as u64, 0_i32 as u64, 0_i32 as u64];
|
||||||
let mut mult2deg: i32 = -1_i32;
|
let mut mult2deg: i32 = -1_i32;
|
||||||
*mult2.offset(0_i32 as isize) = 0_i32 as uint64_t;
|
*mult2.offset(0_i32 as isize) = 0_i32 as u64;
|
||||||
*mult2.offset(1_i32 as isize) = 0_i32 as uint64_t;
|
*mult2.offset(1_i32 as isize) = 0_i32 as u64;
|
||||||
*mult2.offset(2_i32 as isize) = 0_i32 as uint64_t;
|
*mult2.offset(2_i32 as isize) = 0_i32 as u64;
|
||||||
let mut rdeg: i32 = bdeg;
|
let mut rdeg: i32 = bdeg;
|
||||||
let mut r: [uint64_t; 3] = [
|
let mut r: [u64; 3] = [
|
||||||
*b.offset(0_i32 as isize),
|
*b.offset(0_i32 as isize),
|
||||||
*b.offset(1_i32 as isize),
|
*b.offset(1_i32 as isize),
|
||||||
*b.offset(2_i32 as isize),
|
*b.offset(2_i32 as isize),
|
||||||
|
@ -443,7 +441,7 @@ unsafe fn polynomial_xgcd(
|
||||||
*mult2.offset(2_i32 as isize) = tmp2;
|
*mult2.offset(2_i32 as isize) = tmp2;
|
||||||
} else {
|
} else {
|
||||||
let delta: i32 = gcddeg - rdeg;
|
let delta: i32 = gcddeg - rdeg;
|
||||||
let mult: uint64_t =
|
let mult: u64 =
|
||||||
residue_mul(*gcd.offset(gcddeg as isize), residue_inv(r[rdeg as usize]));
|
residue_mul(*gcd.offset(gcddeg as isize), residue_inv(r[rdeg as usize]));
|
||||||
// quotient = mult * x**delta
|
// quotient = mult * x**delta
|
||||||
let mut i: i32 = 0_i32;
|
let mut i: i32 = 0_i32;
|
||||||
|
@ -493,47 +491,47 @@ unsafe fn polynomial_xgcd(
|
||||||
*pmult2deg = mult2deg;
|
*pmult2deg = mult2deg;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn u2poly(src: *const TDivisor, polyu: *mut uint64_t, polyv: *mut uint64_t) -> i32 {
|
unsafe fn u2poly(src: *const TDivisor, polyu: *mut u64, polyv: *mut u64) -> i32 {
|
||||||
if (*src).u[1_i32 as usize] as u64 != 0xffffffffffffffff_u64 {
|
if (*src).u[1_i32 as usize] as u64 != BAD {
|
||||||
*polyu.offset(0_i32 as isize) = (*src).u[0_i32 as usize] as uint64_t;
|
*polyu.offset(0_i32 as isize) = (*src).u[0_i32 as usize] as u64;
|
||||||
*polyu.offset(1_i32 as isize) = (*src).u[1_i32 as usize] as uint64_t;
|
*polyu.offset(1_i32 as isize) = (*src).u[1_i32 as usize] as u64;
|
||||||
*polyu.offset(2_i32 as isize) = 1_i32 as uint64_t;
|
*polyu.offset(2_i32 as isize) = 1_i32 as u64;
|
||||||
*polyv.offset(0_i32 as isize) = (*src).v[0_i32 as usize] as uint64_t;
|
*polyv.offset(0_i32 as isize) = (*src).v[0_i32 as usize] as u64;
|
||||||
*polyv.offset(1_i32 as isize) = (*src).v[1_i32 as usize] as uint64_t;
|
*polyv.offset(1_i32 as isize) = (*src).v[1_i32 as usize] as u64;
|
||||||
return 2_i32;
|
return 2_i32;
|
||||||
}
|
}
|
||||||
if (*src).u[0_i32 as usize] as u64 != 0xffffffffffffffff_u64 {
|
if (*src).u[0_i32 as usize] as u64 != BAD {
|
||||||
*polyu.offset(0_i32 as isize) = (*src).u[0_i32 as usize] as uint64_t;
|
*polyu.offset(0_i32 as isize) = (*src).u[0_i32 as usize] as u64;
|
||||||
*polyu.offset(1_i32 as isize) = 1_i32 as uint64_t;
|
*polyu.offset(1_i32 as isize) = 1_i32 as u64;
|
||||||
*polyv.offset(0_i32 as isize) = (*src).v[0_i32 as usize] as uint64_t;
|
*polyv.offset(0_i32 as isize) = (*src).v[0_i32 as usize] as u64;
|
||||||
*polyv.offset(1_i32 as isize) = 0_i32 as uint64_t;
|
*polyv.offset(1_i32 as isize) = 0_i32 as u64;
|
||||||
return 1_i32;
|
return 1_i32;
|
||||||
}
|
}
|
||||||
*polyu.offset(0_i32 as isize) = 1_i32 as uint64_t;
|
*polyu.offset(0_i32 as isize) = 1_i32 as u64;
|
||||||
*polyv.offset(0_i32 as isize) = 0_i32 as uint64_t;
|
*polyv.offset(0_i32 as isize) = 0_i32 as u64;
|
||||||
*polyv.offset(1_i32 as isize) = 0_i32 as uint64_t;
|
*polyv.offset(1_i32 as isize) = 0_i32 as u64;
|
||||||
0_i32
|
0_i32
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mut TDivisor) {
|
unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mut TDivisor) {
|
||||||
let mut u1: [uint64_t; 3] = [0; 3];
|
let mut u1: [u64; 3] = [0; 3];
|
||||||
let mut u2: [uint64_t; 3] = [0; 3];
|
let mut u2: [u64; 3] = [0; 3];
|
||||||
let mut v1: [uint64_t; 2] = [0; 2];
|
let mut v1: [u64; 2] = [0; 2];
|
||||||
let mut v2: [uint64_t; 2] = [0; 2];
|
let mut v2: [u64; 2] = [0; 2];
|
||||||
let u1deg: i32 = u2poly(src1, u1.as_mut_ptr(), v1.as_mut_ptr());
|
let u1deg: i32 = u2poly(src1, u1.as_mut_ptr(), v1.as_mut_ptr());
|
||||||
let u2deg: i32 = u2poly(src2, u2.as_mut_ptr(), v2.as_mut_ptr());
|
let u2deg: i32 = u2poly(src2, u2.as_mut_ptr(), v2.as_mut_ptr());
|
||||||
// extended gcd: d1 = gcd(u1, u2) = e1*u1 + e2*u2
|
// extended gcd: d1 = gcd(u1, u2) = e1*u1 + e2*u2
|
||||||
let mut d1deg: i32 = 0;
|
let mut d1deg: i32 = 0;
|
||||||
let mut e1deg: i32 = 0;
|
let mut e1deg: i32 = 0;
|
||||||
let mut e2deg: i32 = 0;
|
let mut e2deg: i32 = 0;
|
||||||
let mut d1: [uint64_t; 3] = [0; 3];
|
let mut d1: [u64; 3] = [0; 3];
|
||||||
let mut e1: [uint64_t; 3] = [0; 3];
|
let mut e1: [u64; 3] = [0; 3];
|
||||||
let mut e2: [uint64_t; 3] = [0; 3];
|
let mut e2: [u64; 3] = [0; 3];
|
||||||
polynomial_xgcd(
|
polynomial_xgcd(
|
||||||
u1deg,
|
u1deg,
|
||||||
u1.as_mut_ptr() as *const uint64_t,
|
u1.as_mut_ptr() as *const u64,
|
||||||
u2deg,
|
u2deg,
|
||||||
u2.as_mut_ptr() as *const uint64_t,
|
u2.as_mut_ptr() as *const u64,
|
||||||
&mut d1deg,
|
&mut d1deg,
|
||||||
d1.as_mut_ptr(),
|
d1.as_mut_ptr(),
|
||||||
&mut e1deg,
|
&mut e1deg,
|
||||||
|
@ -542,10 +540,10 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu
|
||||||
e2.as_mut_ptr(),
|
e2.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
// extended gcd again: d = gcd(d1, v1+v2) = c1*d1 + c2*(v1+v2)
|
// extended gcd again: d = gcd(d1, v1+v2) = c1*d1 + c2*(v1+v2)
|
||||||
let mut b: [uint64_t; 3] = [
|
let mut b: [u64; 3] = [
|
||||||
residue_add(v1[0_i32 as usize], v2[0_i32 as usize]),
|
residue_add(v1[0_i32 as usize], v2[0_i32 as usize]),
|
||||||
residue_add(v1[1_i32 as usize], v2[1_i32 as usize]),
|
residue_add(v1[1_i32 as usize], v2[1_i32 as usize]),
|
||||||
0_i32 as uint64_t,
|
0_i32 as u64,
|
||||||
];
|
];
|
||||||
let bdeg: i32 = if b[1_i32 as usize] == 0_i32 as u64 {
|
let bdeg: i32 = if b[1_i32 as usize] == 0_i32 as u64 {
|
||||||
if b[0_i32 as usize] == 0_i32 as u64 {
|
if b[0_i32 as usize] == 0_i32 as u64 {
|
||||||
|
@ -559,14 +557,14 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu
|
||||||
let mut ddeg: i32 = 0;
|
let mut ddeg: i32 = 0;
|
||||||
let mut c1deg: i32 = 0;
|
let mut c1deg: i32 = 0;
|
||||||
let mut c2deg: i32 = 0;
|
let mut c2deg: i32 = 0;
|
||||||
let mut d: [uint64_t; 3] = [0; 3];
|
let mut d: [u64; 3] = [0; 3];
|
||||||
let mut c1: [uint64_t; 3] = [0; 3];
|
let mut c1: [u64; 3] = [0; 3];
|
||||||
let mut c2: [uint64_t; 3] = [0; 3];
|
let mut c2: [u64; 3] = [0; 3];
|
||||||
polynomial_xgcd(
|
polynomial_xgcd(
|
||||||
d1deg,
|
d1deg,
|
||||||
d1.as_mut_ptr() as *const uint64_t,
|
d1.as_mut_ptr() as *const u64,
|
||||||
bdeg,
|
bdeg,
|
||||||
b.as_mut_ptr() as *const uint64_t,
|
b.as_mut_ptr() as *const u64,
|
||||||
&mut ddeg,
|
&mut ddeg,
|
||||||
d.as_mut_ptr(),
|
d.as_mut_ptr(),
|
||||||
&mut c1deg,
|
&mut c1deg,
|
||||||
|
@ -574,13 +572,13 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu
|
||||||
&mut c2deg,
|
&mut c2deg,
|
||||||
c2.as_mut_ptr(),
|
c2.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
let dmult: uint64_t = residue_inv(d[ddeg as usize]);
|
let dmult: u64 = residue_inv(d[ddeg as usize]);
|
||||||
let mut i = 0_i32;
|
let mut i = 0_i32;
|
||||||
while i < ddeg {
|
while i < ddeg {
|
||||||
d[i as usize] = residue_mul(d[i as usize], dmult);
|
d[i as usize] = residue_mul(d[i as usize], dmult);
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
d[i as usize] = 1_i32 as uint64_t;
|
d[i as usize] = 1_i32 as u64;
|
||||||
i = 0_i32;
|
i = 0_i32;
|
||||||
while i <= c1deg {
|
while i <= c1deg {
|
||||||
c1[i as usize] = residue_mul(c1[i as usize], dmult);
|
c1[i as usize] = residue_mul(c1[i as usize], dmult);
|
||||||
|
@ -591,43 +589,43 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu
|
||||||
c2[i as usize] = residue_mul(c2[i as usize], dmult);
|
c2[i as usize] = residue_mul(c2[i as usize], dmult);
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
let mut u: [uint64_t; 5] = [0; 5];
|
let mut u: [u64; 5] = [0; 5];
|
||||||
let mut udeg: i32 = polynomial_mul(
|
let mut udeg: i32 = polynomial_mul(
|
||||||
u1deg,
|
u1deg,
|
||||||
u1.as_mut_ptr() as *const uint64_t,
|
u1.as_mut_ptr() as *const u64,
|
||||||
u2deg,
|
u2deg,
|
||||||
u2.as_mut_ptr() as *const uint64_t,
|
u2.as_mut_ptr() as *const u64,
|
||||||
-1_i32,
|
-1_i32,
|
||||||
u.as_mut_ptr(),
|
u.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
// u is monic
|
// u is monic
|
||||||
let mut v: [uint64_t; 7] = [0; 7];
|
let mut v: [u64; 7] = [0; 7];
|
||||||
let mut tmp: [uint64_t; 7] = [0; 7];
|
let mut tmp: [u64; 7] = [0; 7];
|
||||||
// c1*(e1*u1*v2 + e2*u2*v1) + c2*(v1*v2 + f)
|
// c1*(e1*u1*v2 + e2*u2*v1) + c2*(v1*v2 + f)
|
||||||
// c1*(e1*u1*(v2-v1) + d1*v1) + c2*(v1*v2 + f)
|
// c1*(e1*u1*(v2-v1) + d1*v1) + c2*(v1*v2 + f)
|
||||||
v[0_i32 as usize] = residue_sub(v2[0_i32 as usize], v1[0_i32 as usize]);
|
v[0_i32 as usize] = residue_sub(v2[0_i32 as usize], v1[0_i32 as usize]);
|
||||||
v[1_i32 as usize] = residue_sub(v2[1_i32 as usize], v1[1_i32 as usize]);
|
v[1_i32 as usize] = residue_sub(v2[1_i32 as usize], v1[1_i32 as usize]);
|
||||||
let mut tmpdeg = polynomial_mul(
|
let mut tmpdeg = polynomial_mul(
|
||||||
e1deg,
|
e1deg,
|
||||||
e1.as_mut_ptr() as *const uint64_t,
|
e1.as_mut_ptr() as *const u64,
|
||||||
1_i32,
|
1_i32,
|
||||||
v.as_mut_ptr() as *const uint64_t,
|
v.as_mut_ptr() as *const u64,
|
||||||
-1_i32,
|
-1_i32,
|
||||||
tmp.as_mut_ptr(),
|
tmp.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
let mut vdeg = polynomial_mul(
|
let mut vdeg = polynomial_mul(
|
||||||
u1deg,
|
u1deg,
|
||||||
u1.as_mut_ptr() as *const uint64_t,
|
u1.as_mut_ptr() as *const u64,
|
||||||
tmpdeg,
|
tmpdeg,
|
||||||
tmp.as_mut_ptr() as *const uint64_t,
|
tmp.as_mut_ptr() as *const u64,
|
||||||
-1_i32,
|
-1_i32,
|
||||||
v.as_mut_ptr(),
|
v.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
vdeg = polynomial_mul(
|
vdeg = polynomial_mul(
|
||||||
d1deg,
|
d1deg,
|
||||||
d1.as_mut_ptr() as *const uint64_t,
|
d1.as_mut_ptr() as *const u64,
|
||||||
1_i32,
|
1_i32,
|
||||||
v1.as_mut_ptr() as *const uint64_t,
|
v1.as_mut_ptr() as *const u64,
|
||||||
vdeg,
|
vdeg,
|
||||||
v.as_mut_ptr(),
|
v.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
|
@ -645,27 +643,27 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu
|
||||||
tmpdeg = 5_i32;
|
tmpdeg = 5_i32;
|
||||||
tmpdeg = polynomial_mul(
|
tmpdeg = polynomial_mul(
|
||||||
1_i32,
|
1_i32,
|
||||||
v1.as_mut_ptr() as *const uint64_t,
|
v1.as_mut_ptr() as *const u64,
|
||||||
1_i32,
|
1_i32,
|
||||||
v2.as_mut_ptr() as *const uint64_t,
|
v2.as_mut_ptr() as *const u64,
|
||||||
tmpdeg,
|
tmpdeg,
|
||||||
tmp.as_mut_ptr(),
|
tmp.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
vdeg = polynomial_mul(
|
vdeg = polynomial_mul(
|
||||||
c2deg,
|
c2deg,
|
||||||
c2.as_mut_ptr() as *const uint64_t,
|
c2.as_mut_ptr() as *const u64,
|
||||||
tmpdeg,
|
tmpdeg,
|
||||||
tmp.as_mut_ptr() as *const uint64_t,
|
tmp.as_mut_ptr() as *const u64,
|
||||||
vdeg,
|
vdeg,
|
||||||
v.as_mut_ptr(),
|
v.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
if ddeg > 0_i32 {
|
if ddeg > 0_i32 {
|
||||||
let mut udiv: [uint64_t; 5] = [0; 5];
|
let mut udiv: [u64; 5] = [0; 5];
|
||||||
polynomial_div_monic(
|
polynomial_div_monic(
|
||||||
udeg,
|
udeg,
|
||||||
u.as_mut_ptr(),
|
u.as_mut_ptr(),
|
||||||
ddeg,
|
ddeg,
|
||||||
d.as_mut_ptr() as *const uint64_t,
|
d.as_mut_ptr() as *const u64,
|
||||||
udiv.as_mut_ptr(),
|
udiv.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
udeg -= ddeg;
|
udeg -= ddeg;
|
||||||
|
@ -673,7 +671,7 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu
|
||||||
udeg,
|
udeg,
|
||||||
udiv.as_mut_ptr(),
|
udiv.as_mut_ptr(),
|
||||||
ddeg,
|
ddeg,
|
||||||
d.as_mut_ptr() as *const uint64_t,
|
d.as_mut_ptr() as *const u64,
|
||||||
u.as_mut_ptr(),
|
u.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
udeg -= ddeg;
|
udeg -= ddeg;
|
||||||
|
@ -682,7 +680,7 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu
|
||||||
vdeg,
|
vdeg,
|
||||||
v.as_mut_ptr(),
|
v.as_mut_ptr(),
|
||||||
ddeg,
|
ddeg,
|
||||||
d.as_mut_ptr() as *const uint64_t,
|
d.as_mut_ptr() as *const u64,
|
||||||
udiv.as_mut_ptr(),
|
udiv.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
vdeg -= ddeg;
|
vdeg -= ddeg;
|
||||||
|
@ -695,16 +693,16 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu
|
||||||
vdeg,
|
vdeg,
|
||||||
v.as_mut_ptr(),
|
v.as_mut_ptr(),
|
||||||
udeg,
|
udeg,
|
||||||
u.as_mut_ptr() as *const uint64_t,
|
u.as_mut_ptr() as *const u64,
|
||||||
std::ptr::null_mut::<uint64_t>(),
|
std::ptr::null_mut::<u64>(),
|
||||||
);
|
);
|
||||||
while udeg > 2_i32 {
|
while udeg > 2_i32 {
|
||||||
// u' = monic((f-v^2)/u), v'=-v mod u'
|
// u' = monic((f-v^2)/u), v'=-v mod u'
|
||||||
tmpdeg = polynomial_mul(
|
tmpdeg = polynomial_mul(
|
||||||
vdeg,
|
vdeg,
|
||||||
v.as_mut_ptr() as *const uint64_t,
|
v.as_mut_ptr() as *const u64,
|
||||||
vdeg,
|
vdeg,
|
||||||
v.as_mut_ptr() as *const uint64_t,
|
v.as_mut_ptr() as *const u64,
|
||||||
-1_i32,
|
-1_i32,
|
||||||
tmp.as_mut_ptr(),
|
tmp.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
|
@ -714,7 +712,7 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
while i <= tmpdeg {
|
while i <= tmpdeg {
|
||||||
tmp[i as usize] = residue_sub(0_i32 as uint64_t, tmp[i as usize]);
|
tmp[i as usize] = residue_sub(0_i32 as u64, tmp[i as usize]);
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
while i <= 5_i32 {
|
while i <= 5_i32 {
|
||||||
|
@ -722,76 +720,76 @@ unsafe fn divisor_add(src1: *const TDivisor, src2: *const TDivisor, mut dst: *mu
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
tmpdeg = i - 1_i32;
|
tmpdeg = i - 1_i32;
|
||||||
let mut udiv_0: [uint64_t; 5] = [0; 5];
|
let mut udiv_0: [u64; 5] = [0; 5];
|
||||||
polynomial_div_monic(
|
polynomial_div_monic(
|
||||||
tmpdeg,
|
tmpdeg,
|
||||||
tmp.as_mut_ptr(),
|
tmp.as_mut_ptr(),
|
||||||
udeg,
|
udeg,
|
||||||
u.as_mut_ptr() as *const uint64_t,
|
u.as_mut_ptr() as *const u64,
|
||||||
udiv_0.as_mut_ptr(),
|
udiv_0.as_mut_ptr(),
|
||||||
);
|
);
|
||||||
udeg = tmpdeg - udeg;
|
udeg = tmpdeg - udeg;
|
||||||
let mult: uint64_t = residue_inv(udiv_0[udeg as usize]);
|
let mult: u64 = residue_inv(udiv_0[udeg as usize]);
|
||||||
i = 0_i32;
|
i = 0_i32;
|
||||||
while i < udeg {
|
while i < udeg {
|
||||||
u[i as usize] = residue_mul(udiv_0[i as usize], mult);
|
u[i as usize] = residue_mul(udiv_0[i as usize], mult);
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
u[i as usize] = 1_i32 as uint64_t;
|
u[i as usize] = 1_i32 as u64;
|
||||||
i = 0_i32;
|
i = 0_i32;
|
||||||
while i <= vdeg {
|
while i <= vdeg {
|
||||||
v[i as usize] = residue_sub(0_i32 as uint64_t, v[i as usize]);
|
v[i as usize] = residue_sub(0_i32 as u64, v[i as usize]);
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
vdeg = polynomial_div_monic(
|
vdeg = polynomial_div_monic(
|
||||||
vdeg,
|
vdeg,
|
||||||
v.as_mut_ptr(),
|
v.as_mut_ptr(),
|
||||||
udeg,
|
udeg,
|
||||||
u.as_mut_ptr() as *const uint64_t,
|
u.as_mut_ptr() as *const u64,
|
||||||
std::ptr::null_mut::<uint64_t>(),
|
std::ptr::null_mut::<u64>(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if udeg == 2_i32 {
|
if udeg == 2_i32 {
|
||||||
(*dst).u[0_i32 as usize] = u[0_i32 as usize] as uint16_t;
|
(*dst).u[0_i32 as usize] = u[0_i32 as usize] as u16;
|
||||||
(*dst).u[1_i32 as usize] = u[1_i32 as usize] as uint16_t;
|
(*dst).u[1_i32 as usize] = u[1_i32 as usize] as u16;
|
||||||
(*dst).v[0_i32 as usize] = (if vdeg >= 0_i32 {
|
(*dst).v[0_i32 as usize] = (if vdeg >= 0_i32 {
|
||||||
v[0_i32 as usize]
|
v[0_i32 as usize]
|
||||||
} else {
|
} else {
|
||||||
0_i32 as u64
|
0_i32 as u64
|
||||||
}) as uint16_t;
|
}) as u16;
|
||||||
(*dst).v[1_i32 as usize] = (if vdeg >= 1_i32 {
|
(*dst).v[1_i32 as usize] = (if vdeg >= 1_i32 {
|
||||||
v[1_i32 as usize]
|
v[1_i32 as usize]
|
||||||
} else {
|
} else {
|
||||||
0_i32 as u64
|
0_i32 as u64
|
||||||
}) as uint16_t;
|
}) as u16;
|
||||||
} else if udeg == 1_i32 {
|
} else if udeg == 1_i32 {
|
||||||
(*dst).u[0_i32 as usize] = u[0_i32 as usize] as uint16_t;
|
(*dst).u[0_i32 as usize] = u[0_i32 as usize] as u16;
|
||||||
(*dst).u[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
(*dst).u[1_i32 as usize] = BAD as u16;
|
||||||
(*dst).v[0_i32 as usize] = (if vdeg >= 0_i32 {
|
(*dst).v[0_i32 as usize] = (if vdeg >= 0_i32 {
|
||||||
v[0_i32 as usize]
|
v[0_i32 as usize]
|
||||||
} else {
|
} else {
|
||||||
0_i32 as u64
|
0_i32 as u64
|
||||||
}) as uint16_t;
|
}) as u16;
|
||||||
(*dst).v[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
(*dst).v[1_i32 as usize] = BAD as u16;
|
||||||
} else {
|
} else {
|
||||||
(*dst).u[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
(*dst).u[0_i32 as usize] = BAD as u16;
|
||||||
(*dst).u[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
(*dst).u[1_i32 as usize] = BAD as u16;
|
||||||
(*dst).v[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
(*dst).v[0_i32 as usize] = BAD as u16;
|
||||||
(*dst).v[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
(*dst).v[1_i32 as usize] = BAD as u16;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn divisor_mul128(
|
unsafe fn divisor_mul128(
|
||||||
src: *const TDivisor,
|
src: *const TDivisor,
|
||||||
mut mult_lo: uint64_t,
|
mut mult_lo: u64,
|
||||||
mut mult_hi: uint64_t,
|
mut mult_hi: u64,
|
||||||
mut dst: *mut TDivisor,
|
mut dst: *mut TDivisor,
|
||||||
) {
|
) {
|
||||||
if mult_lo == 0_i32 as u64 && mult_hi == 0_i32 as u64 {
|
if mult_lo == 0_i32 as u64 && mult_hi == 0_i32 as u64 {
|
||||||
(*dst).u[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
(*dst).u[0_i32 as usize] = BAD as u16;
|
||||||
(*dst).u[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
(*dst).u[1_i32 as usize] = BAD as u16;
|
||||||
(*dst).v[0_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
(*dst).v[0_i32 as usize] = BAD as u16;
|
||||||
(*dst).v[1_i32 as usize] = 0xffffffffffffffff_u64 as uint16_t;
|
(*dst).v[1_i32 as usize] = BAD as u16;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let mut cur: TDivisor = *src;
|
let mut cur: TDivisor = *src;
|
||||||
|
@ -799,7 +797,7 @@ unsafe fn divisor_mul128(
|
||||||
divisor_add(&cur, &cur, &mut cur);
|
divisor_add(&cur, &cur, &mut cur);
|
||||||
mult_lo >>= 1_i32;
|
mult_lo >>= 1_i32;
|
||||||
if mult_hi & 1_i32 as u64 != 0 {
|
if mult_hi & 1_i32 as u64 != 0 {
|
||||||
mult_lo = (mult_lo | 1_u64 << 63_i32) as uint64_t;
|
mult_lo |= 1_u64 << 63_i32;
|
||||||
}
|
}
|
||||||
mult_hi >>= 1_i32;
|
mult_hi >>= 1_i32;
|
||||||
}
|
}
|
||||||
|
@ -807,7 +805,7 @@ unsafe fn divisor_mul128(
|
||||||
loop {
|
loop {
|
||||||
mult_lo >>= 1_i32;
|
mult_lo >>= 1_i32;
|
||||||
if mult_hi & 1_i32 as u64 != 0 {
|
if mult_hi & 1_i32 as u64 != 0 {
|
||||||
mult_lo = (mult_lo | 1_u64 << 63_i32) as uint64_t;
|
mult_lo |= 1_u64 << 63_i32;
|
||||||
}
|
}
|
||||||
mult_hi >>= 1_i32;
|
mult_hi >>= 1_i32;
|
||||||
if mult_lo == 0_i32 as u64 && mult_hi == 0_i32 as u64 {
|
if mult_lo == 0_i32 as u64 && mult_hi == 0_i32 as u64 {
|
||||||
|
@ -1150,9 +1148,9 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) ->
|
||||||
keybuf.as_mut_ptr() as *mut c_void,
|
keybuf.as_mut_ptr() as *mut c_void,
|
||||||
8,
|
8,
|
||||||
);
|
);
|
||||||
let mut productIdMixed: uint64_t = (productId1 as uint64_t) << 41_i32
|
let mut productIdMixed: u64 = (productId1 as u64) << 41_i32
|
||||||
| (productId2 as uint64_t) << 58_i32
|
| (productId2 as u64) << 58_i32
|
||||||
| (productId3 as uint64_t) << 17_i32
|
| (productId3 as u64) << 17_i32
|
||||||
| productId4 as u64;
|
| productId4 as u64;
|
||||||
ptr::copy_nonoverlapping(
|
ptr::copy_nonoverlapping(
|
||||||
&mut productIdMixed as *mut u64 as *const c_void,
|
&mut productIdMixed as *mut u64 as *const c_void,
|
||||||
|
@ -1166,8 +1164,8 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) ->
|
||||||
let mut attempt = 0_i32 as u8;
|
let mut attempt = 0_i32 as u8;
|
||||||
while attempt as i32 <= 0x80_i32 {
|
while attempt as i32 <= 0x80_i32 {
|
||||||
let mut u: C2RustUnnamed_3 = C2RustUnnamed_3 { buffer: [0; 14] };
|
let mut u: C2RustUnnamed_3 = C2RustUnnamed_3 { buffer: [0; 14] };
|
||||||
u.c2rust_unnamed.lo = 0_i32 as uint64_t;
|
u.c2rust_unnamed.lo = 0_i32 as u64;
|
||||||
u.c2rust_unnamed.hi = 0_i32 as uint64_t;
|
u.c2rust_unnamed.hi = 0_i32 as u64;
|
||||||
u.buffer[7_i32 as usize] = attempt;
|
u.buffer[7_i32 as usize] = attempt;
|
||||||
Mix(
|
Mix(
|
||||||
(u.buffer).as_mut_ptr(),
|
(u.buffer).as_mut_ptr(),
|
||||||
|
@ -1175,17 +1173,14 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) ->
|
||||||
keybuf.as_mut_ptr(),
|
keybuf.as_mut_ptr(),
|
||||||
16_i32 as size_t,
|
16_i32 as size_t,
|
||||||
);
|
);
|
||||||
let mut x2: uint64_t = ui128_quotient_mod(u.c2rust_unnamed.lo, u.c2rust_unnamed.hi);
|
let mut x2: u64 = ui128_quotient_mod(u.c2rust_unnamed.lo, u.c2rust_unnamed.hi);
|
||||||
let x1: uint64_t =
|
let x1: u64 = u.c2rust_unnamed.lo.wrapping_sub(x2.wrapping_mul(MOD));
|
||||||
u.c2rust_unnamed
|
|
||||||
.lo
|
|
||||||
.wrapping_sub(x2.wrapping_mul(0x16a6b036d7f2a79_u64)) as uint64_t;
|
|
||||||
x2 = x2.wrapping_add(1);
|
x2 = x2.wrapping_add(1);
|
||||||
d_0.u[0_i32 as usize] = residue_sub(
|
d_0.u[0_i32 as usize] = residue_sub(
|
||||||
residue_mul(x1, x1),
|
residue_mul(x1, x1),
|
||||||
residue_mul(43_i32 as uint64_t, residue_mul(x2, x2)),
|
residue_mul(43_i32 as u64, residue_mul(x2, x2)),
|
||||||
) as uint16_t;
|
) as u16;
|
||||||
d_0.u[1_i32 as usize] = residue_add(x1, x1) as uint16_t;
|
d_0.u[1_i32 as usize] = residue_add(x1, x1) as u16;
|
||||||
if find_divisor_v(&mut d_0) != 0 {
|
if find_divisor_v(&mut d_0) != 0 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1196,8 +1191,8 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) ->
|
||||||
}
|
}
|
||||||
divisor_mul128(
|
divisor_mul128(
|
||||||
&d_0,
|
&d_0,
|
||||||
0x4e21b9d10f127c1_i64 as uint64_t,
|
0x4e21b9d10f127c1_i64 as u64,
|
||||||
0x40da7c36d44c_i64 as uint64_t,
|
0x40da7c36d44c_i64 as u64,
|
||||||
&mut d_0,
|
&mut d_0,
|
||||||
);
|
);
|
||||||
let mut e: C2RustUnnamed_0 = C2RustUnnamed_0 {
|
let mut e: C2RustUnnamed_0 = C2RustUnnamed_0 {
|
||||||
|
@ -1206,63 +1201,56 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) ->
|
||||||
encoded_hi: 0,
|
encoded_hi: 0,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
if d_0.u[0_i32 as usize] as u64 == 0xffffffffffffffff_u64 {
|
if d_0.u[0_i32 as usize] as u64 == BAD {
|
||||||
// we can not get the zero divisor, actually...
|
// we can not get the zero divisor, actually...
|
||||||
e.c2rust_unnamed.encoded_lo = umul128(
|
e.c2rust_unnamed.encoded_lo = umul128(
|
||||||
0x16a6b036d7f2a79_u64.wrapping_add(2_i32 as u64) as uint64_t,
|
MOD.wrapping_add(2_i32 as u64),
|
||||||
0x16a6b036d7f2a79_u64 as uint64_t,
|
MOD,
|
||||||
&mut e.c2rust_unnamed.encoded_hi,
|
&mut e.c2rust_unnamed.encoded_hi,
|
||||||
);
|
);
|
||||||
} else if d_0.u[1_i32 as usize] as u64 == 0xffffffffffffffff_u64 {
|
} else if d_0.u[1_i32 as usize] as u64 == BAD {
|
||||||
e.c2rust_unnamed.encoded_lo = umul128(
|
e.c2rust_unnamed.encoded_lo = umul128(
|
||||||
0x16a6b036d7f2a79_u64.wrapping_add(1_i32 as u64) as uint64_t,
|
MOD.wrapping_add(1_i32 as u64),
|
||||||
d_0.u[0_i32 as usize] as uint64_t,
|
d_0.u[0_i32 as usize] as u64,
|
||||||
&mut e.c2rust_unnamed.encoded_hi,
|
&mut e.c2rust_unnamed.encoded_hi,
|
||||||
);
|
);
|
||||||
e.c2rust_unnamed.encoded_lo =
|
e.c2rust_unnamed.encoded_lo = e.c2rust_unnamed.encoded_lo.wrapping_add(MOD);
|
||||||
e.c2rust_unnamed
|
|
||||||
.encoded_lo
|
|
||||||
.wrapping_add(0x16a6b036d7f2a79_u64) as uint64_t as uint64_t;
|
|
||||||
e.c2rust_unnamed.encoded_hi = e
|
e.c2rust_unnamed.encoded_hi = e
|
||||||
.c2rust_unnamed
|
.c2rust_unnamed
|
||||||
.encoded_hi
|
.encoded_hi
|
||||||
.wrapping_add((e.c2rust_unnamed.encoded_lo < 0x16a6b036d7f2a79_u64) as i32 as u64)
|
.wrapping_add((e.c2rust_unnamed.encoded_lo < MOD) as i32 as u64);
|
||||||
as uint64_t as uint64_t;
|
|
||||||
} else {
|
} else {
|
||||||
let x1_0: uint64_t = (if d_0.u[1_i32 as usize] as i32 % 2_i32 != 0 {
|
let x1_0: u64 = (if d_0.u[1_i32 as usize] as i32 % 2_i32 != 0 {
|
||||||
(d_0.u[1_i32 as usize] as u64).wrapping_add(0x16a6b036d7f2a79_u64)
|
(d_0.u[1_i32 as usize] as u64).wrapping_add(MOD)
|
||||||
} else {
|
} else {
|
||||||
d_0.u[1_i32 as usize] as u64
|
d_0.u[1_i32 as usize] as u64
|
||||||
})
|
})
|
||||||
.wrapping_div(2_i32 as u64) as uint64_t;
|
.wrapping_div(2_i32 as u64);
|
||||||
let x2sqr: uint64_t =
|
let x2sqr: u64 = residue_sub(residue_mul(x1_0, x1_0), d_0.u[0_i32 as usize] as u64);
|
||||||
residue_sub(residue_mul(x1_0, x1_0), d_0.u[0_i32 as usize] as uint64_t);
|
let mut x2_0: u64 = residue_sqrt(x2sqr);
|
||||||
let mut x2_0: uint64_t = residue_sqrt(x2sqr);
|
if x2_0 == BAD {
|
||||||
if x2_0 == 0xffffffffffffffff_u64 {
|
x2_0 = residue_sqrt(residue_mul(x2sqr, residue_inv(43_i32 as u64)));
|
||||||
x2_0 = residue_sqrt(residue_mul(x2sqr, residue_inv(43_i32 as uint64_t)));
|
|
||||||
e.c2rust_unnamed.encoded_lo = umul128(
|
e.c2rust_unnamed.encoded_lo = umul128(
|
||||||
0x16a6b036d7f2a79_u64.wrapping_add(1_i32 as u64) as uint64_t,
|
MOD.wrapping_add(1_i32 as u64),
|
||||||
0x16a6b036d7f2a79_u64.wrapping_add(x2_0) as uint64_t,
|
MOD.wrapping_add(x2_0),
|
||||||
&mut e.c2rust_unnamed.encoded_hi,
|
&mut e.c2rust_unnamed.encoded_hi,
|
||||||
);
|
);
|
||||||
e.c2rust_unnamed.encoded_lo =
|
e.c2rust_unnamed.encoded_lo = e.c2rust_unnamed.encoded_lo.wrapping_add(x1_0);
|
||||||
e.c2rust_unnamed.encoded_lo.wrapping_add(x1_0) as uint64_t as uint64_t;
|
|
||||||
e.c2rust_unnamed.encoded_hi = e
|
e.c2rust_unnamed.encoded_hi = e
|
||||||
.c2rust_unnamed
|
.c2rust_unnamed
|
||||||
.encoded_hi
|
.encoded_hi
|
||||||
.wrapping_add((e.c2rust_unnamed.encoded_lo < x1_0) as i32 as u64)
|
.wrapping_add((e.c2rust_unnamed.encoded_lo < x1_0) as i32 as u64);
|
||||||
as uint64_t as uint64_t;
|
|
||||||
} else {
|
} else {
|
||||||
// points (-x1+x2, v(-x1+x2)) and (-x1-x2, v(-x1-x2))
|
// points (-x1+x2, v(-x1+x2)) and (-x1-x2, v(-x1-x2))
|
||||||
let mut x1a: uint64_t = residue_sub(x1_0, x2_0);
|
let mut x1a: u64 = residue_sub(x1_0, x2_0);
|
||||||
let y1: uint64_t = residue_sub(
|
let y1: u64 = residue_sub(
|
||||||
d_0.v[0_i32 as usize] as uint64_t,
|
d_0.v[0_i32 as usize] as u64,
|
||||||
residue_mul(d_0.v[1_i32 as usize] as uint64_t, x1a),
|
residue_mul(d_0.v[1_i32 as usize] as u64, x1a),
|
||||||
);
|
);
|
||||||
let mut x2a: uint64_t = residue_add(x1_0, x2_0);
|
let mut x2a: u64 = residue_add(x1_0, x2_0);
|
||||||
let y2: uint64_t = residue_sub(
|
let y2: u64 = residue_sub(
|
||||||
d_0.v[0_i32 as usize] as uint64_t,
|
d_0.v[0_i32 as usize] as u64,
|
||||||
residue_mul(d_0.v[1_i32 as usize] as uint64_t, x2a),
|
residue_mul(d_0.v[1_i32 as usize] as u64, x2a),
|
||||||
);
|
);
|
||||||
if x1a > x2a {
|
if x1a > x2a {
|
||||||
std::mem::swap(&mut x1a, &mut x2a);
|
std::mem::swap(&mut x1a, &mut x2a);
|
||||||
|
@ -1271,44 +1259,38 @@ unsafe fn Generate(installation_id_str: *const i8, confirmation_id: *mut i8) ->
|
||||||
std::mem::swap(&mut x1a, &mut x2a);
|
std::mem::swap(&mut x1a, &mut x2a);
|
||||||
}
|
}
|
||||||
e.c2rust_unnamed.encoded_lo = umul128(
|
e.c2rust_unnamed.encoded_lo = umul128(
|
||||||
0x16a6b036d7f2a79_u64.wrapping_add(1_i32 as u64) as uint64_t,
|
MOD.wrapping_add(1_i32 as u64),
|
||||||
x1a,
|
x1a,
|
||||||
&mut e.c2rust_unnamed.encoded_hi,
|
&mut e.c2rust_unnamed.encoded_hi,
|
||||||
);
|
);
|
||||||
e.c2rust_unnamed.encoded_lo =
|
e.c2rust_unnamed.encoded_lo = e.c2rust_unnamed.encoded_lo.wrapping_add(x2a);
|
||||||
e.c2rust_unnamed.encoded_lo.wrapping_add(x2a) as uint64_t as uint64_t;
|
|
||||||
e.c2rust_unnamed.encoded_hi = e
|
e.c2rust_unnamed.encoded_hi = e
|
||||||
.c2rust_unnamed
|
.c2rust_unnamed
|
||||||
.encoded_hi
|
.encoded_hi
|
||||||
.wrapping_add((e.c2rust_unnamed.encoded_lo < x2a) as i32 as u64)
|
.wrapping_add((e.c2rust_unnamed.encoded_lo < x2a) as i32 as u64);
|
||||||
as uint64_t as uint64_t;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let mut decimal: [u8; 35] = [0; 35];
|
let mut decimal: [u8; 35] = [0; 35];
|
||||||
let mut i = 0_i32 as size_t;
|
let mut i = 0_i32 as size_t;
|
||||||
while i < 35_i32 as u64 {
|
while i < 35_i32 as u64 {
|
||||||
let c: u32 = (e.c2rust_unnamed_0.encoded[3_i32 as usize]).wrapping_rem(10_i32 as u32);
|
let c: u32 = (e.c2rust_unnamed_0.encoded[3_i32 as usize]).wrapping_rem(10_i32 as u32);
|
||||||
e.c2rust_unnamed_0.encoded[3_i32 as usize] = e.c2rust_unnamed_0.encoded[3_i32 as usize]
|
e.c2rust_unnamed_0.encoded[3_i32 as usize] =
|
||||||
.wrapping_div(10_i32 as u32)
|
e.c2rust_unnamed_0.encoded[3_i32 as usize].wrapping_div(10_i32 as u32);
|
||||||
as uint32_t as uint32_t;
|
let c2: u32 = ((c as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64)
|
||||||
let c2: u32 = ((c as uint64_t) << 32_i32
|
|
||||||
| e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64)
|
|
||||||
.wrapping_rem(10_i32 as u64) as u32;
|
.wrapping_rem(10_i32 as u64) as u32;
|
||||||
e.c2rust_unnamed_0.encoded[2_i32 as usize] =
|
e.c2rust_unnamed_0.encoded[2_i32 as usize] =
|
||||||
((c as uint64_t) << 32_i32 | e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64)
|
((c as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[2_i32 as usize] as u64)
|
||||||
.wrapping_div(10_i32 as u64) as uint32_t;
|
.wrapping_div(10_i32 as u64) as u32;
|
||||||
let c3: u32 = ((c2 as uint64_t) << 32_i32
|
let c3: u32 = ((c2 as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64)
|
||||||
| e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64)
|
|
||||||
.wrapping_rem(10_i32 as u64) as u32;
|
.wrapping_rem(10_i32 as u64) as u32;
|
||||||
e.c2rust_unnamed_0.encoded[1_i32 as usize] =
|
e.c2rust_unnamed_0.encoded[1_i32 as usize] =
|
||||||
((c2 as uint64_t) << 32_i32 | e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64)
|
((c2 as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[1_i32 as usize] as u64)
|
||||||
.wrapping_div(10_i32 as u64) as uint32_t;
|
.wrapping_div(10_i32 as u64) as u32;
|
||||||
let c4: u32 = ((c3 as uint64_t) << 32_i32
|
let c4: u32 = ((c3 as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64)
|
||||||
| e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64)
|
|
||||||
.wrapping_rem(10_i32 as u64) as u32;
|
.wrapping_rem(10_i32 as u64) as u32;
|
||||||
e.c2rust_unnamed_0.encoded[0_i32 as usize] =
|
e.c2rust_unnamed_0.encoded[0_i32 as usize] =
|
||||||
((c3 as uint64_t) << 32_i32 | e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64)
|
((c3 as u64) << 32_i32 | e.c2rust_unnamed_0.encoded[0_i32 as usize] as u64)
|
||||||
.wrapping_div(10_i32 as u64) as uint32_t;
|
.wrapping_div(10_i32 as u64) as u32;
|
||||||
decimal[(34_i32 as u64).wrapping_sub(i) as usize] = c4 as u8;
|
decimal[(34_i32 as u64).wrapping_sub(i) as usize] = c4 as u8;
|
||||||
i = i.wrapping_add(1);
|
i = i.wrapping_add(1);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue