Skip to content

Commit

Permalink
Merge branch 'master' into jps/moar-threads
Browse files Browse the repository at this point in the history
  • Loading branch information
DilumAluthge authored Apr 26, 2022
2 parents c7ea709 + 3423639 commit ab78165
Show file tree
Hide file tree
Showing 6 changed files with 43 additions and 10 deletions.
1 change: 1 addition & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ Standard library changes
system image with other BLAS/LAPACK libraries is not
supported. Instead, it is recommended that the LBT mechanism be used
for swapping BLAS/LAPACK with vendor provided ones. ([#44360])
* `normalize(x, p=2)` now supports any normed vector space `x`, including scalars ([#44925]).

#### Markdown

Expand Down
18 changes: 14 additions & 4 deletions src/codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1321,7 +1321,7 @@ class jl_codectx_t {
public:
IRBuilder<> builder;
jl_codegen_params_t &emission_context;
jl_codegen_call_targets_t &call_targets;
llvm::MapVector<jl_code_instance_t*, jl_codegen_call_target_t> call_targets;
std::map<void*, GlobalVariable*> &global_targets;
Function *f = NULL;
// local var info. globals are not in here.
Expand Down Expand Up @@ -1366,7 +1366,7 @@ class jl_codectx_t {
jl_codectx_t(LLVMContext &llvmctx, jl_codegen_params_t &params)
: builder(llvmctx),
emission_context(params),
call_targets(params.workqueue),
call_targets(),
global_targets(params.globals),
world(params.world),
use_cache(params.cache),
Expand All @@ -1384,6 +1384,9 @@ class jl_codectx_t {

~jl_codectx_t() {
assert(this->roots == NULL);
// Transfer local delayed calls to the global queue
for (auto call_target : call_targets)
emission_context.workqueue.push_back(call_target);
}
};

Expand Down Expand Up @@ -3911,6 +3914,11 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, const
}
}
}
auto it = ctx.call_targets.find(codeinst);
if (need_to_emit && it != ctx.call_targets.end()) {
protoname = std::get<2>(it->second)->getName();
need_to_emit = false;
}
if (need_to_emit) {
raw_string_ostream(name) << (specsig ? "j_" : "j1_") << name_from_method_instance(mi) << "_" << globalUniqueGeneratedNames++;
protoname = StringRef(name);
Expand All @@ -3924,7 +3932,7 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, const
handled = true;
if (need_to_emit) {
Function *trampoline_decl = cast<Function>(jl_Module->getNamedValue(protoname));
ctx.call_targets.push_back(std::make_tuple(codeinst, cc, return_roots, trampoline_decl, specsig));
ctx.call_targets[codeinst] = std::make_tuple(cc, return_roots, trampoline_decl, specsig);
}
}
}
Expand Down Expand Up @@ -8097,7 +8105,9 @@ void jl_compile_workqueue(
jl_returninfo_t::CallingConv proto_cc;
bool proto_specsig;
unsigned proto_return_roots;
std::tie(codeinst, proto_cc, proto_return_roots, protodecl, proto_specsig) = params.workqueue.back();
auto it = params.workqueue.back();
codeinst = it.first;
std::tie(proto_cc, proto_return_roots, protodecl, proto_specsig) = it.second;
params.workqueue.pop_back();
// try to emit code for this item from the workqueue
assert(codeinst->min_world <= params.world && codeinst->max_world >= params.world &&
Expand Down
1 change: 1 addition & 0 deletions src/disasm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
// for outputting disassembly
#include <llvm/ADT/Triple.h>
#include <llvm/AsmParser/Parser.h>
#include <llvm/Analysis/TargetTransformInfo.h>
#include <llvm/BinaryFormat/COFF.h>
#include <llvm/BinaryFormat/MachO.h>
#include <llvm/DebugInfo/DIContext.h>
Expand Down
6 changes: 4 additions & 2 deletions src/jitlayers.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
// This file is a part of Julia. License is MIT: https://julialang.org/license

#include <llvm/ADT/MapVector.h>

#include <llvm/IR/LLVMContext.h>
#include <llvm/IR/Constants.h>
#include <llvm/IR/Module.h>
Expand Down Expand Up @@ -117,14 +119,14 @@ struct jl_llvmf_dump_t {
Function *F;
};

typedef std::vector<std::tuple<jl_code_instance_t*, jl_returninfo_t::CallingConv, unsigned, llvm::Function*, bool>> jl_codegen_call_targets_t;
typedef std::tuple<jl_returninfo_t::CallingConv, unsigned, llvm::Function*, bool> jl_codegen_call_target_t;

typedef struct _jl_codegen_params_t {
orc::ThreadSafeContext tsctx;
orc::ThreadSafeContext::Lock tsctx_lock;
typedef StringMap<GlobalVariable*> SymMapGV;
// outputs
jl_codegen_call_targets_t workqueue;
std::vector<std::pair<jl_code_instance_t*, jl_codegen_call_target_t>> workqueue;
std::map<void*, GlobalVariable*> globals;
std::map<jl_datatype_t*, DIType*> ditypes;
std::map<jl_datatype_t*, Type*> llvmtypes;
Expand Down
20 changes: 16 additions & 4 deletions stdlib/LinearAlgebra/src/generic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1790,11 +1790,12 @@ end
end

"""
normalize(a::AbstractArray, p::Real=2)
normalize(a, p::Real=2)
Normalize the array `a` so that its `p`-norm equals unity,
i.e. `norm(a, p) == 1`.
See also [`normalize!`](@ref) and [`norm`](@ref).
Normalize `a` so that its `p`-norm equals unity,
i.e. `norm(a, p) == 1`. For scalars, this is similar to sign(a),
except normalize(0) = NaN.
See also [`normalize!`](@ref), [`norm`](@ref), and [`sign`](@ref).
# Examples
```jldoctest
Expand Down Expand Up @@ -1831,6 +1832,14 @@ julia> normalize(a)
0.154303 0.308607 0.617213
0.154303 0.308607 0.617213
julia> normalize(3, 1)
1.0
julia> normalize(-8, 1)
-1.0
julia> normalize(0, 1)
NaN
```
"""
function normalize(a::AbstractArray, p::Real = 2)
Expand All @@ -1843,3 +1852,6 @@ function normalize(a::AbstractArray, p::Real = 2)
return T[]
end
end

normalize(x) = x / norm(x)
normalize(x, p::Real) = x / norm(x, p)
7 changes: 7 additions & 0 deletions stdlib/LinearAlgebra/test/generic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -375,6 +375,13 @@ end
@test typeof(normalize([1 2 3; 4 5 6])) == Array{Float64,2}
end

@testset "normalize for scalars" begin
@test normalize(8.0) == 1.0
@test normalize(-3.0) == -1.0
@test normalize(-3.0, 1) == -1.0
@test isnan(normalize(0.0))
end

@testset "Issue #30466" begin
@test norm([typemin(Int), typemin(Int)], Inf) == -float(typemin(Int))
@test norm([typemin(Int), typemin(Int)], 1) == -2float(typemin(Int))
Expand Down

0 comments on commit ab78165

Please sign in to comment.