Skip to content

Commit

Permalink
intrinsic casts: perform error checking (JuliaLang#29422)
Browse files Browse the repository at this point in the history
previously, we just let LLVM abort (even though this could potentially happen in dead-code).
  • Loading branch information
vtjnash authored and Keno committed Sep 29, 2018
1 parent 8907aff commit f7f87c2
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 76 deletions.
9 changes: 6 additions & 3 deletions src/APInt-C.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,8 @@ void LLVMUItoFP(unsigned numbits, integerPart *pa, unsigned onumbits, integerPar

extern "C" JL_DLLEXPORT
void LLVMSExt(unsigned inumbits, integerPart *pa, unsigned onumbits, integerPart *pr) {
assert(inumbits < onumbits);
if (!(onumbits > inumbits))
jl_error("SExt: output bitsize must be > input bitsize");
unsigned inumbytes = RoundUpToAlignment(inumbits, host_char_bit) / host_char_bit;
unsigned onumbytes = RoundUpToAlignment(onumbits, host_char_bit) / host_char_bit;
int bits = (0 - inumbits) % host_char_bit;
Expand All @@ -430,7 +431,8 @@ void LLVMSExt(unsigned inumbits, integerPart *pa, unsigned onumbits, integerPart

extern "C" JL_DLLEXPORT
void LLVMZExt(unsigned inumbits, integerPart *pa, unsigned onumbits, integerPart *pr) {
assert(inumbits < onumbits);
if (!(onumbits > inumbits))
jl_error("ZExt: output bitsize must be > input bitsize");
unsigned inumbytes = RoundUpToAlignment(inumbits, host_char_bit) / host_char_bit;
unsigned onumbytes = RoundUpToAlignment(onumbits, host_char_bit) / host_char_bit;
int bits = (0 - inumbits) % host_char_bit;
Expand All @@ -446,7 +448,8 @@ void LLVMZExt(unsigned inumbits, integerPart *pa, unsigned onumbits, integerPart

extern "C" JL_DLLEXPORT
void LLVMTrunc(unsigned inumbits, integerPart *pa, unsigned onumbits, integerPart *pr) {
assert(inumbits > onumbits);
if (!(onumbits < inumbits))
jl_error("Trunc: output bitsize must be < input bitsize");
unsigned onumbytes = RoundUpToAlignment(onumbits, host_char_bit) / host_char_bit;
memcpy(pr, pa, onumbytes);
}
Expand Down
91 changes: 25 additions & 66 deletions src/intrinsics.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ static jl_cgval_t generic_bitcast(jl_codectx_t &ctx, const jl_cgval_t *argv)

static jl_cgval_t generic_cast(
jl_codectx_t &ctx,
intrinsic f, Value *(*generic)(jl_codectx_t&, Type*, Value*),
intrinsic f, Instruction::CastOps Op,
const jl_cgval_t *argv, bool toint, bool fromint)
{
const jl_cgval_t &targ = argv[0];
Expand All @@ -523,64 +523,23 @@ static jl_cgval_t generic_cast(
if (!to || !vt)
return emit_runtime_call(ctx, f, argv, 2);
Value *from = emit_unbox(ctx, vt, v, v.typ);
Value *ans = generic(ctx, to, from);
return mark_julia_type(ctx, ans, false, jlto);
}

static Value *generic_trunc(jl_codectx_t &ctx, Type *to, Value *x)
{
return ctx.builder.CreateTrunc(x, to);
}

static Value *generic_sext(jl_codectx_t &ctx, Type *to, Value *x)
{
return ctx.builder.CreateSExt(x, to);
}

static Value *generic_zext(jl_codectx_t &ctx, Type *to, Value *x)
{
return ctx.builder.CreateZExt(x, to);
}

static Value *generic_uitofp(jl_codectx_t &ctx, Type *to, Value *x)
{
return ctx.builder.CreateUIToFP(x, to);
}

static Value *generic_sitofp(jl_codectx_t &ctx, Type *to, Value *x)
{
return ctx.builder.CreateSIToFP(x, to);
}

static Value *generic_fptoui(jl_codectx_t &ctx, Type *to, Value *x)
{
return ctx.builder.CreateFPToUI(x, to);
}

static Value *generic_fptosi(jl_codectx_t &ctx, Type *to, Value *x)
{
return ctx.builder.CreateFPToSI(x, to);
}

static Value *generic_fptrunc(jl_codectx_t &ctx, Type *to, Value *x)
{
return ctx.builder.CreateFPTrunc(x, to);
}

static Value *generic_fpext(jl_codectx_t &ctx, Type *to, Value *x)
{
if (!CastInst::castIsValid(Op, from, to))
return emit_runtime_call(ctx, f, argv, 2);
if (Op == Instruction::FPExt) {
#ifdef JL_NEED_FLOATTEMP_VAR
// Target platform might carry extra precision.
// Force rounding to single precision first. The reason is that it's
// fine to keep working in extended precision as long as it's
// understood that everything is implicitly rounded to 23 bits,
// but if we start looking at more bits we need to actually do the
// rounding first instead of carrying around incorrect low bits.
Value *jlfloattemp_var = emit_static_alloca(ctx, x->getType());
ctx.builder.CreateStore(x, jlfloattemp_var);
x = ctx.builder.CreateLoad(jlfloattemp_var, true);
// Target platform might carry extra precision.
// Force rounding to single precision first. The reason is that it's
// fine to keep working in extended precision as long as it's
// understood that everything is implicitly rounded to 23 bits,
// but if we start looking at more bits we need to actually do the
// rounding first instead of carrying around incorrect low bits.
Value *jlfloattemp_var = emit_static_alloca(ctx, from->getType());
ctx.builder.CreateStore(from, jlfloattemp_var);
from = ctx.builder.CreateLoad(jlfloattemp_var, /*force this to load from the stack*/true);
#endif
return ctx.builder.CreateFPExt(x, to);
}
Value *ans = ctx.builder.CreateCast(Op, from, to);
return mark_julia_type(ctx, ans, false, jlto);
}

static jl_cgval_t emit_runtime_pointerref(jl_codectx_t &ctx, jl_cgval_t *argv)
Expand Down Expand Up @@ -924,23 +883,23 @@ static jl_cgval_t emit_intrinsic(jl_codectx_t &ctx, intrinsic f, jl_value_t **ar
case bitcast:
return generic_bitcast(ctx, argv);
case trunc_int:
return generic_cast(ctx, f, generic_trunc, argv, true, true);
return generic_cast(ctx, f, Instruction::Trunc, argv, true, true);
case sext_int:
return generic_cast(ctx, f, generic_sext, argv, true, true);
return generic_cast(ctx, f, Instruction::SExt, argv, true, true);
case zext_int:
return generic_cast(ctx, f, generic_zext, argv, true, true);
return generic_cast(ctx, f, Instruction::ZExt, argv, true, true);
case uitofp:
return generic_cast(ctx, f, generic_uitofp, argv, false, true);
return generic_cast(ctx, f, Instruction::UIToFP, argv, false, true);
case sitofp:
return generic_cast(ctx, f, generic_sitofp, argv, false, true);
return generic_cast(ctx, f, Instruction::SIToFP, argv, false, true);
case fptoui:
return generic_cast(ctx, f, generic_fptoui, argv, true, false);
return generic_cast(ctx, f, Instruction::FPToUI, argv, true, false);
case fptosi:
return generic_cast(ctx, f, generic_fptosi, argv, true, false);
return generic_cast(ctx, f, Instruction::FPToSI, argv, true, false);
case fptrunc:
return generic_cast(ctx, f, generic_fptrunc, argv, false, false);
return generic_cast(ctx, f, Instruction::FPTrunc, argv, false, false);
case fpext:
return generic_cast(ctx, f, generic_fpext, argv, false, false);
return generic_cast(ctx, f, Instruction::FPExt, argv, false, false);

case not_int: {
const jl_cgval_t &x = argv[0];
Expand Down
19 changes: 15 additions & 4 deletions src/runtime_intrinsics.c
Original file line number Diff line number Diff line change
Expand Up @@ -834,15 +834,26 @@ cvt_iintrinsic(LLVMUItoFP, uitofp)
cvt_iintrinsic(LLVMFPtoSI, fptosi)
cvt_iintrinsic(LLVMFPtoUI, fptoui)

#define fpcvt(pr, a) \
#define fptrunc(pr, a) \
if (!(osize < 8 * sizeof(a))) \
jl_error("fptrunc: output bitsize must be < input bitsize"); \
if (osize == 32) \
*(float*)pr = a; \
else if (osize == 64) \
*(double*)pr = a; \
else \
jl_error("fptrunc/fpext: runtime floating point intrinsics are not implemented for bit sizes other than 32 and 64");
un_fintrinsic_withtype(fpcvt,fptrunc)
un_fintrinsic_withtype(fpcvt,fpext)
jl_error("fptrunc: runtime floating point intrinsics are not implemented for bit sizes other than 32 and 64");
#define fpext(pr, a) \
if (!(osize > 8 * sizeof(a))) \
jl_error("fpext: output bitsize must be > input bitsize"); \
if (osize == 32) \
*(float*)pr = a; \
else if (osize == 64) \
*(double*)pr = a; \
else \
jl_error("fpext: runtime floating point intrinsics are not implemented for bit sizes other than 32 and 64");
un_fintrinsic_withtype(fptrunc,fptrunc)
un_fintrinsic_withtype(fpext,fpext)

// checked arithmetic
#define check_sadd_int(a,b) \
Expand Down
16 changes: 13 additions & 3 deletions test/intrinsics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,19 @@ include("testenv.jl")
@test isa(convert(Char, 65), Char)

# runtime intrinsics
let f = Any[Core.Intrinsics.add_int, Core.Intrinsics.sub_int]
@test f[1](1, 1) == 2
@test f[2](1, 1) == 0
@testset "runtime intrinsics" begin
@test Core.Intrinsics.add_int(1, 1) == 2
@test Core.Intrinsics.sub_int(1, 1) == 0
@test_throws ErrorException("fpext: output bitsize must be > input bitsize") Core.Intrinsics.fpext(Int32, 0x0000_0000)
@test_throws ErrorException("fpext: output bitsize must be > input bitsize") Core.Intrinsics.fpext(Int32, 0x0000_0000_0000_0000)
@test_throws ErrorException("fptrunc: output bitsize must be < input bitsize") Core.Intrinsics.fptrunc(Int32, 0x0000_0000)
@test_throws ErrorException("fptrunc: output bitsize must be < input bitsize") Core.Intrinsics.fptrunc(Int64, 0x0000_0000)
@test_throws ErrorException("ZExt: output bitsize must be > input bitsize") Core.Intrinsics.zext_int(Int8, 0x00)
@test_throws ErrorException("SExt: output bitsize must be > input bitsize") Core.Intrinsics.sext_int(Int8, 0x00)
@test_throws ErrorException("ZExt: output bitsize must be > input bitsize") Core.Intrinsics.zext_int(Int8, 0x0000)
@test_throws ErrorException("SExt: output bitsize must be > input bitsize") Core.Intrinsics.sext_int(Int8, 0x0000)
@test_throws ErrorException("Trunc: output bitsize must be < input bitsize") Core.Intrinsics.trunc_int(Int8, 0x00)
@test_throws ErrorException("Trunc: output bitsize must be < input bitsize") Core.Intrinsics.trunc_int(Int16, 0x00)
end

# issue #4581
Expand Down

0 comments on commit f7f87c2

Please sign in to comment.