Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove hand-coded methods for (U)Int128 on 32 bit systems #53867

Merged
merged 2 commits into from
Apr 16, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
164 changes: 6 additions & 158 deletions base/int.jl
Original file line number Diff line number Diff line change
Expand Up @@ -843,166 +843,14 @@ widemul(x::Bool,y::Number) = x * y
widemul(x::Number,y::Bool) = x * y


## wide multiplication, Int128 multiply and divide ##

if Core.sizeof(Int) == 4
function widemul(u::Int64, v::Int64)
local u0::UInt64, v0::UInt64, w0::UInt64
local u1::Int64, v1::Int64, w1::UInt64, w2::Int64, t::UInt64

u0 = u & 0xffffffff; u1 = u >> 32
v0 = v & 0xffffffff; v1 = v >> 32
w0 = u0 * v0
t = reinterpret(UInt64, u1) * v0 + (w0 >>> 32)
w2 = reinterpret(Int64, t) >> 32
w1 = u0 * reinterpret(UInt64, v1) + (t & 0xffffffff)
hi = u1 * v1 + w2 + (reinterpret(Int64, w1) >> 32)
lo = w0 & 0xffffffff + (w1 << 32)
return Int128(hi) << 64 + Int128(lo)
end

function widemul(u::UInt64, v::UInt64)
local u0::UInt64, v0::UInt64, w0::UInt64
local u1::UInt64, v1::UInt64, w1::UInt64, w2::UInt64, t::UInt64

u0 = u & 0xffffffff; u1 = u >>> 32
v0 = v & 0xffffffff; v1 = v >>> 32
w0 = u0 * v0
t = u1 * v0 + (w0 >>> 32)
w2 = t >>> 32
w1 = u0 * v1 + (t & 0xffffffff)
hi = u1 * v1 + w2 + (w1 >>> 32)
lo = w0 & 0xffffffff + (w1 << 32)
return UInt128(hi) << 64 + UInt128(lo)
end

function *(u::Int128, v::Int128)
u0 = u % UInt64; u1 = Int64(u >> 64)
v0 = v % UInt64; v1 = Int64(v >> 64)
lolo = widemul(u0, v0)
lohi = widemul(reinterpret(Int64, u0), v1)
hilo = widemul(u1, reinterpret(Int64, v0))
t = reinterpret(UInt128, hilo) + (lolo >>> 64)
w1 = reinterpret(UInt128, lohi) + (t & 0xffffffffffffffff)
return Int128(lolo & 0xffffffffffffffff) + reinterpret(Int128, w1) << 64
end

function *(u::UInt128, v::UInt128)
u0 = u % UInt64; u1 = UInt64(u>>>64)
v0 = v % UInt64; v1 = UInt64(v>>>64)
lolo = widemul(u0, v0)
lohi = widemul(u0, v1)
hilo = widemul(u1, v0)
t = hilo + (lolo >>> 64)
w1 = lohi + (t & 0xffffffffffffffff)
return (lolo & 0xffffffffffffffff) + UInt128(w1) << 64
end

function _setbit(x::UInt128, i)
# faster version of `return x | (UInt128(1) << i)`
j = i >> 5
y = UInt128(one(UInt32) << (i & 0x1f))
if j == 0
return x | y
elseif j == 1
return x | (y << 32)
elseif j == 2
return x | (y << 64)
elseif j == 3
return x | (y << 96)
end
return x
end
# Int128 multiply and divide
*(x::T, y::T) where {T<:Union{Int128,UInt128}} = mul_int(x, y)

function divrem(x::UInt128, y::UInt128)
iszero(y) && throw(DivideError())
if (x >> 64) % UInt64 == 0
if (y >> 64) % UInt64 == 0
# fast path: upper 64 bits are zero, so we can fallback to UInt64 division
q64, x64 = divrem(x % UInt64, y % UInt64)
return UInt128(q64), UInt128(x64)
else
# this implies y>x, so
return zero(UInt128), x
end
end
n = leading_zeros(y) - leading_zeros(x)
q = zero(UInt128)
ys = y << n
while n >= 0
# ys == y * 2^n
if ys <= x
x -= ys
q = _setbit(q, n)
if (x >> 64) % UInt64 == 0
# exit early, similar to above fast path
if (y >> 64) % UInt64 == 0
q64, x64 = divrem(x % UInt64, y % UInt64)
q |= q64
x = UInt128(x64)
end
return q, x
end
end
ys >>>= 1
n -= 1
end
return q, x
end
div(x::Int128, y::Int128) = checked_sdiv_int(x, y)
div(x::UInt128, y::UInt128) = checked_udiv_int(x, y)

function div(x::Int128, y::Int128)
(x == typemin(Int128)) & (y == -1) && throw(DivideError())
return Int128(div(BigInt(x), BigInt(y)))::Int128
end
div(x::UInt128, y::UInt128) = divrem(x, y)[1]

function rem(x::Int128, y::Int128)
return Int128(rem(BigInt(x), BigInt(y)))::Int128
end

function rem(x::UInt128, y::UInt128)
iszero(y) && throw(DivideError())
if (x >> 64) % UInt64 == 0
if (y >> 64) % UInt64 == 0
# fast path: upper 64 bits are zero, so we can fallback to UInt64 division
return UInt128(rem(x % UInt64, y % UInt64))
else
# this implies y>x, so
return x
end
end
n = leading_zeros(y) - leading_zeros(x)
ys = y << n
while n >= 0
# ys == y * 2^n
if ys <= x
x -= ys
if (x >> 64) % UInt64 == 0
# exit early, similar to above fast path
if (y >> 64) % UInt64 == 0
x = UInt128(rem(x % UInt64, y % UInt64))
end
return x
end
end
ys >>>= 1
n -= 1
end
return x
end

function mod(x::Int128, y::Int128)
return Int128(mod(BigInt(x), BigInt(y)))::Int128
end
else
*(x::T, y::T) where {T<:Union{Int128,UInt128}} = mul_int(x, y)

div(x::Int128, y::Int128) = checked_sdiv_int(x, y)
div(x::UInt128, y::UInt128) = checked_udiv_int(x, y)

rem(x::Int128, y::Int128) = checked_srem_int(x, y)
rem(x::UInt128, y::UInt128) = checked_urem_int(x, y)
end
rem(x::Int128, y::Int128) = checked_srem_int(x, y)
rem(x::UInt128, y::UInt128) = checked_urem_int(x, y)

# issue #15489: since integer ops are unchecked, they shouldn't check promotion
for op in (:+, :-, :*, :&, :|, :xor)
Expand Down