Skip to content

Commit

Permalink
Merge pull request JuliaLang#13681 from JuliaLang/cjh/normalize
Browse files Browse the repository at this point in the history
Implement normalize and normalize!
  • Loading branch information
jiahao committed Oct 23, 2015
2 parents 4e81212 + 5df3ebd commit e0a8985
Show file tree
Hide file tree
Showing 7 changed files with 162 additions and 8 deletions.
24 changes: 17 additions & 7 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,18 +45,28 @@ Library improvements

* `cov` and `cor` don't use keyword arguments anymore and are therefore now type stable ([#13465]).

* New method for generic QR with column pivoting ([#13480]).
* Linear algebra:

* A new `SparseVector` type allows for one-dimensional sparse arrays. Slicing
and reshaping sparse matrices now return vectors when appropriate. The
`sparsevec` function returns a one-dimensional sparse vector instead of a
one-column sparse matrix.
* New `normalize` and `normalize!` convenience functions for normalizing
vectors ([#13681]).

* QR

* New method for generic QR with column pivoting ([#13480]).

* New method for polar decompositions of `AbstractVector`s ([#13681]).

* A new `SparseVector` type allows for one-dimensional sparse arrays.
Slicing and reshaping sparse matrices now return vectors when
appropriate. The `sparsevec` function returns a one-dimensional sparse
vector instead of a one-column sparse matrix. ([#13440])

Deprecated or removed
---------------------

* The method `A_ldiv_B!(SparseMatrixCSC, StrideVecOrMat)` has been deprecated in favor
of versions that require the matrix to in factored form ([#13496]).
* The method `A_ldiv_B!(SparseMatrixCSC, StrideVecOrMat)` has been deprecated
in favor of versions that require the matrix to be in factored form
([#13496]).

Julia v0.4.0 Release Notes
==========================
Expand Down
2 changes: 2 additions & 0 deletions base/exports.jl
Original file line number Diff line number Diff line change
Expand Up @@ -676,6 +676,8 @@ export
lufact,
lyap,
norm,
normalize,
normalize!,
nullspace,
ordschur!,
ordschur,
Expand Down
2 changes: 2 additions & 0 deletions base/linalg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@ export
lufact!,
lyap,
norm,
normalize,
normalize!,
nullspace,
ordschur!,
ordschur,
Expand Down
70 changes: 69 additions & 1 deletion base/linalg/generic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ scale(s::Number, X::AbstractArray) = s*X
# For better performance when input and output are the same array
# See https://github.com/JuliaLang/julia/issues/8415#issuecomment-56608729
function generic_scale!(X::AbstractArray, s::Number)
for I in eachindex(X)
@simd for I in eachindex(X)
@inbounds X[I] *= s
end
X
Expand Down Expand Up @@ -529,3 +529,71 @@ function isapprox{T<:Number,S<:Number}(x::AbstractArray{T}, y::AbstractArray{S};
d = norm(x - y)
return isfinite(d) ? d <= atol + rtol*max(norm(x), norm(y)) : x == y
end

"""
normalize!(v, [p=2])
Normalize the vector `v` in-place with respect to the `p`-norm.
# Inputs
- `v::AbstractVector` - vector to be normalized
- `p::Real` - The `p`-norm to normalize with respect to. Default: 2
# Output
- `v` - A unit vector being the input vector, rescaled to have norm 1.
The input vector is modified in-place.
# See also
`normalize`, `qr`
"""
function normalize!(v::AbstractVector, p::Real=2)
nrm = norm(v, p)
__normalize!(v, nrm)
end

@inline function __normalize!{T<:AbstractFloat}(v::AbstractVector{T}, nrm::T)
#The largest positive floating point number whose inverse is less than
#infinity
const δ = inv(prevfloat(typemax(T)))

if nrm δ #Safe to multiply with inverse
invnrm = inv(nrm)
scale!(v, invnrm)

else #Divide by norm; slower but more correct
#Note 2015-10-19: As of Julia 0.4, @simd does not vectorize floating
#point division, although vectorized intrinsics like DIVPD exist. I
#will leave the @simd annotation in, hoping that someone will improve
#the @simd macro in the future. - cjh
@inbounds @simd for i in eachindex(v)
v[i] /= nrm
end
end

v
end

"""
normalize(v, [p=2])
Normalize the vector `v` with respect to the `p`-norm.
# Inputs
- `v::AbstractVector` - vector to be normalized
- `p::Real` - The `p`-norm to normalize with respect to. Default: 2
# Output
- `v` - A unit vector being a copy of the input vector, scaled to have norm 1
# See also
`normalize!`, `qr`
"""
normalize(v::AbstractVector, p::Real=2) = v/norm(v, p)

43 changes: 43 additions & 0 deletions base/linalg/qr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,49 @@ function _qr(A::Union{Number, AbstractMatrix}, ::Type{Val{true}}; thin::Bool=tru
full(getq(F), thin=thin), F[:R]::Matrix{eltype(F)}, F[:p]::Vector{BlasInt}
end

"""
qr(v::AbstractVector)
Computes the polar decomposition of a vector.
# Input
- `v::AbstractVector` - vector to normalize
# Outputs
- `w` - A unit vector in the direction of `v`
- `r` - The norm of `v`
# See also
`normalize`, `normalize!`, `qr!`
"""
function qr(v::AbstractVector)
nrm = norm(v)
v/nrm, nrm
end

"""
qr!(v::AbstractVector)
Computes the polar decomposition of a vector.
# Input
- `v::AbstractVector` - vector to normalize
# Outputs
- `w` - A unit vector in the direction of `v`
- `r` - The norm of `v`
# See also
`normalize`, `normalize!`, `qr`
"""
function qr!(v::AbstractVector)
nrm = norm(v)
__normalize!(v, nrm), nrm
end


convert{T}(::Type{QR{T}},A::QR) = QR(convert(AbstractMatrix{T}, A.factors), convert(Vector{T}, A.τ))
convert{T}(::Type{Factorization{T}}, A::QR) = convert(QR{T}, A)
convert{T}(::Type{QRCompactWY{T}},A::QRCompactWY) = QRCompactWY(convert(AbstractMatrix{T}, A.factors), convert(AbstractMatrix{T}, A.T))
Expand Down
21 changes: 21 additions & 0 deletions test/linalg/generic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -150,3 +150,24 @@ let
@test LinAlg.axpy!(α, x, deepcopy(y)) == x .* Matrix{Int}[α]
@test LinAlg.axpy!(α, x, deepcopy(y)) != Matrix{Int}[α] .* x
end

let
v = [3.0, 4.0]
@test norm(v) === 5.0
w = normalize(v)
@test w == [0.6, 0.8]
@test norm(w) === 1.0
@test norm(normalize!(v) - w, Inf) < eps()
end

#Test potential overflow in normalize!
let
δ = inv(prevfloat(typemax(Float64)))
v = [δ, -δ]

@test norm(v) === 7.866824069956793e-309
w = normalize(v)
@test w [1/√2, -1/√2]
@test norm(w) === 1.0
@test norm(normalize!(v) - w, Inf) < eps()
end
8 changes: 8 additions & 0 deletions test/linalg/qr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -164,3 +164,11 @@ let
Q=full(qrfact(A)[:Q])
@test vecnorm(A-Q) < eps()
end

let
debug && println("qr on AbstractVector")

v = [3.0, 4.0]
@test qr(v) == ([0.6, 0.8], 5.0)
end

0 comments on commit e0a8985

Please sign in to comment.