Skip to content

Commit

Permalink
version 1.1-0
Browse files Browse the repository at this point in the history
  • Loading branch information
vigou3 authored and cran-robot committed Feb 23, 2010
1 parent 8addf93 commit 3822e6e
Show file tree
Hide file tree
Showing 53 changed files with 401 additions and 211 deletions.
8 changes: 4 additions & 4 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
Package: actuar
Type: Package
Title: Actuarial functions
Version: 1.0-2
Date: 2009-05-14
Version: 1.1-0
Date: 2010-02-23
Author: Vincent Goulet, S�bastien Auclair, Christophe Dutang, Xavier
Milhaud, Tommy Ouellet, Louis-Philippe Pouliot, Mathieu Pigeon
Maintainer: Vincent Goulet <[email protected]>
Expand All @@ -17,6 +17,6 @@ Encoding: latin1
LazyLoad: yes
LazyData: yes
ZipData: yes
Packaged: Thu May 14 15:15:17 2009; vincent
Packaged: 2010-02-23 06:28:59 UTC; vincent
Repository: CRAN
Date/Publication: 2009-05-15 11:26:02
Date/Publication: 2010-02-23 07:49:36
4 changes: 3 additions & 1 deletion NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ export(## Credibility theory
## Simulation of compound models
simul, simpf, severity, unroll,
## Risk theory
aggregateDist, CTE, discretize, discretise, VaR, adjCoef, ruin,
aggregateDist, CTE, TVaR, discretize, discretise, VaR, adjCoef, ruin,
## One parameter distributions
dinvexp, pinvexp, qinvexp, rinvexp, minvexp, levinvexp,
mexp, levexp, mgfexp,
Expand Down Expand Up @@ -59,6 +59,8 @@ S3method(aggregate, portfolio)

S3method(CTE, aggregateDist)

S3method(diff, aggregateDist)

S3method(elev, default)
S3method(elev, grouped.data)

Expand Down
5 changes: 3 additions & 2 deletions R/CTE.R
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@ CTE.aggregateDist <- function(x, conf.level = c(0.9, 0.95, 0.99),
{
m <- get("mean", environment(x))
sd <- sqrt(get("variance", environment(x)))
res <- m + sd * exp(-(qnorm(conf.level))^2 / 2) /
((1 - conf.level) * sqrt(2 * pi))
res <- m + sd * dnorm(qnorm(conf.level)) / (1 - conf.level)
}
## Normal Power approximation; no explicit formula so revert to
## numerical integration.
Expand Down Expand Up @@ -60,3 +59,5 @@ CTE.aggregateDist <- function(x, conf.level = c(0.9, 0.95, 0.99),
}
res
}

TVaR <- CTE
19 changes: 17 additions & 2 deletions R/aggregateDist.R
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
aggregateDist <-
function(method = c("recursive", "convolution", "normal", "npower", "simulation"),
model.freq = NULL, model.sev = NULL, p0 = NULL, x.scale = 1,
moments, nb.simul, ..., tol = 1e-06, maxit = 500, echo = FALSE)
convolve = 0, moments, nb.simul, ...,
tol = 1e-06, maxit = 500, echo = FALSE)
{
Call <- match.call()

Expand Down Expand Up @@ -64,7 +65,7 @@ aggregateDist <-
c("poisson", "geometric", "negative binomial",
"binomial", "logarithmic"))
FUN <- panjer(fx = model.sev, dist = dist, p0 = p0,
x.scale = x.scale, ...,
x.scale = x.scale, ..., convolve = convolve,
tol = tol, maxit = maxit, echo = echo)
comment(FUN) <- "Recursive method approximation"
}
Expand Down Expand Up @@ -191,3 +192,17 @@ mean.aggregateDist <- function(x, ...)
drop(crossprod(get("x", envir = environment(x)),
get("fs", envir = environment(x))))
}

diff.aggregateDist <- function(x, ...)
{
label <- comment(x)

## The 'diff' method is defined for the recursive, exact and
## simulation methods only.
if (label == "Normal approximation" || label == "Normal Power approximation")
stop("function not defined for approximating distributions")

## The probability vector is already stored in the environment of
## the "aggregateDist" object.
get("fs", environment(x))
}
6 changes: 6 additions & 0 deletions R/cm.R
Original file line number Diff line number Diff line change
Expand Up @@ -150,10 +150,16 @@ cm <- function(formula, data, ratios, weights, subset,
res$ordering <- list(seq_along(levs))
}
else # hierarchical model
{
## Computations with auxiliary function.
res <- hierarc(ratios, weights, classification = ilevs,
method = method, tol = tol, maxit = maxit,
echo = echo)

## Put back original level names into the object
res$classification <- levs
}

## Transfer level names to lists
names(res$means) <- names(res$weights) <- c("portfolio", level.names)
names(res$unbiased) <- if (!is.null(res$unbiased)) names(res$means)
Expand Down
18 changes: 12 additions & 6 deletions R/panjer.R
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,18 @@
### Sebastien Auclair, Louis-Philippe Pouliot and Tommy Ouellet

panjer <- function(fx, dist, p0 = NULL, x.scale = 1, ...,
tol = sqrt(.Machine$double.eps), maxit = 500,
echo = FALSE)
convolve = 0, tol = sqrt(.Machine$double.eps),
maxit = 500, echo = FALSE)
{
## Express 'tol' as a value close to 1.
tol <- 1 - tol

## Express 'tol' as a value close to 1. If needed, modify the
## accuracy level so that the user specified level is attained
## *after* the additional convolutions (without getting too high).
tol <- if (convolve > 0)
min((0.5 - tol + 0.5)^(0.5 ^ convolve),
0.5 - sqrt(.Machine$double.eps) + 0.5)
else
0.5 - tol + 0.5

## Check whether p0 is a valid probability or not.
if ( !is.null(p0) ) if ( (p0 < 0) | (p0 > 1) )
stop("'p0' must be a valid probability (between 0 and 1)")
Expand Down Expand Up @@ -105,7 +111,7 @@ panjer <- function(fx, dist, p0 = NULL, x.scale = 1, ...,
if (is.null(p0))
p1 = 0

fs <- .External("do_panjer", p0, p1, fs0, fx, a, b, tol, maxit, echo)
fs <- .External("do_panjer", p0, p1, fs0, fx, a, b, convolve, tol, maxit, echo)

FUN <- approxfun((0:(length(fs) - 1)) * x.scale, pmin(cumsum(fs), 1),
method = "constant", yleft = 0, yright = 1, f = 0,
Expand Down
7 changes: 3 additions & 4 deletions R/quantile.aggregateDist.R
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,7 @@ quantile.aggregateDist <-
## The Normal and Normal Power approximations are the only
## continuous distributions of class 'aggregateDist'. They are
## therefore treated differently, using the 'base' quantile
## function qnorm() or numerical optimization through the 'base'
## optimize function.
## function qnorm().
if (label == "Normal approximation")
res <- qnorm(probs, get("mean", environment(x)),
sqrt(get("variance", environment(x))))
Expand Down Expand Up @@ -43,8 +42,8 @@ quantile.aggregateDist <-
approxfun(y, x, yleft = 0, yright = max(x),
method = "linear", ties = "ordered")
else # cdf
fun <- approxfun(y, x, yleft = 0, yright = max(x),
method = "constant", ties = "ordered")
approxfun(y, x, yleft = 0, yright = max(x),
method = "constant", f = 1, ties = "ordered")

## Quantiles
res <- fun(probs)
Expand Down
30 changes: 30 additions & 0 deletions inst/NEWS
Original file line number Diff line number Diff line change
@@ -1,5 +1,35 @@
=== actuar: An R Package for Actuarial Science ===

Version 1.1-0
=============

NEW FEATURES

o New argument 'convolve' in aggregateDist() to convolve the
distribution obtained with the recursive method a number of times
with itself. This is used for large portfolios where the expected
number of claims is so large that recursions cannot start.
Dividing the frequency parameter by 2^n and convolving n times can
solve the problem.

o New method of 'diff' for "aggregateDist" objects to return the
probability mass function at the knots of the aggregate
distribution. Valid (and defined) for "recursive", "exact" and
"simulation" methods only.

o Since the terminolgy Tail Value-at-Risk is often used instead of
Conditional Tail Expectation, TVaR() is now an alias for CTE().

BUG FIXES

o Quantiles (and thus VaRs and CTEs) for "aggregateDist" objects
where off by one knot of the distribution.

o cm() returned the internal classification codes instead of the
original ones for hierarchical models. (Thanks to Zachary Martin
for the heads up.)


Version 1.0-2
=============

Expand Down
24 changes: 19 additions & 5 deletions inst/doc/actuar.bib
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,9 @@ @Article{Goulet:simpf:2008
title = {Simulation of Compound Hierarchical Models in {R}},
journal = NAAJ,
year = 2008,
note = {To appear},
year = 2008,
volume = 12,
pages = {401-412},
language = {english}
}

Expand Down Expand Up @@ -236,6 +238,17 @@ @Book{LossModels2e
language = {english}
}

@Book{LossModels3e,
author = {Klugman, S. A. and Panjer, H. H. and Willmot, G.},
title = {Loss Models: From Data to Decisions},
edition = {Third},
publisher = {Wiley},
year = 2008,
address = {New York},
isbn = {978-0-4701878-1-4},
language = {english}
}

@Book{MART,
author = {Kaas, R. and Goovaerts, M. and Dhaene, J. and Denuit, M.},
title = {Modern actuarial risk theory},
Expand Down Expand Up @@ -326,10 +339,11 @@ @Article{actuar
}

@Article{cm,
author = {Goulet, V. and Ouellet, T.},
title = {On parameter estimation in hierarchical credibility},
author = {Belhadj, H. and Goulet, V. and Ouellet, T.},
title = {On Parameter Estimation in Hierarchical Credibility},
journal = AB,
year = 2008,
note = {Submitted for publication},
year = 2009,
volume = 39,
number = 2,
language = {english}
}
Binary file modified inst/doc/actuar.pdf
Binary file not shown.
Binary file modified inst/doc/coverage.pdf
Binary file not shown.
Binary file modified inst/doc/credibility.pdf
Binary file not shown.
Binary file modified inst/doc/lossdist.pdf
Binary file not shown.
61 changes: 47 additions & 14 deletions inst/doc/risk.Rnw
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
\usepackage[utf8]{inputenc}
\usepackage[english]{babel}
\usepackage{lucidabr}
\usepackage{booktabs,tabularx,threeparttable}
\usepackage{booktabs}
\usepackage{paralist}
\usepackage[noae]{Sweave}

Expand Down Expand Up @@ -74,7 +74,7 @@ that the surplus becomes negative, in which case technical ruin of the
insurance company occurs.

The interested reader can read more on these subjects in
\cite{LossModels2e,Gerber_MRT,DenuitCharpentier1,MART}, among others.
\cite{LossModels3e,Gerber_MRT,DenuitCharpentier1,MART}, among others.

The current version of \pkg{actuar} \citep{actuar} contains four
visible functions related to the above problems: two for the
Expand Down Expand Up @@ -117,9 +117,11 @@ is the $n$-fold convolution of $F_C(\cdot)$. If $C$ is discrete on $0,
\begin{cases}
I\{x \geq 0\}, & k = 0 \\
F_C(x), & k = 1 \\
\sum_{y = 0}^x F_C^{*(k - 1)}(x - y) f_C(y), & k = 2, 3, \dots
\sum_{y = 0}^x F_C^{*(k - 1)}(x - y) f_C(y), & k = 2, 3, \dots,
\end{cases}
\end{equation}
where $I\{\mathcal{A}\} = 1$ if $\mathcal{A}$ is true and
$I\{\mathcal{A}\} = 0$ otherwise.



Expand Down Expand Up @@ -255,7 +257,7 @@ supported.
This requires the severity distribution to be discrete arithmetic on
$0, 1, 2, \dots, m$ for some monetary unit and the frequency
distribution to be a member of either the $(a, b, 0)$ or $(a, b, 1)$
family of distributions \citep{LossModels2e}. (These families
family of distributions \citep{LossModels3e}. (These families
contain the Poisson, binomial, negative binomial and logarithmic
distributions and their extensions with an arbitrary mass at $x =
0$.) The general recursive formula is:
Expand Down Expand Up @@ -307,10 +309,14 @@ supported.
approximation is valid for $x > \mu_S$ only and performs reasonably
well when $\gamma_S < 1$. See \cite{Daykin_et_al} for details.
\item Simulation of a random sample from $S$ and approximation of
$F_S(x)$ by the empirical cdf \eqref{eq:ecdf}. The simulation itself
is done with function \code{simul} (see the \code{"simulation"}
vignette)). This function admits very general hierarchical models
for both the frequency and the severity components.
$F_S(x)$ by the empirical cdf
\begin{equation}
F_n(x) = \frac{1}{n} \sum_{j = 1}^n I\{x_j \leq x\}.
\end{equation}
The simulation itself is done with function \code{simul} (see the
\code{"simulation"} vignette)). This function admits very general
hierarchical models for both the frequency and the severity
components.
\end{enumerate}

Here also, adding other methods to \code{aggregateDist} is simple due
Expand All @@ -323,6 +329,16 @@ the severity distribution. This way, one does not have to mentally do
the conversion between the support of $0, 1, 2, \dots$ assumed by the
recursive and convolution methods and the true support of $S$.

The recursive method fails when the expected number of claims is so
large that $f_S(0)$ is numerically equal to zero. One solution
proposed by \citet{LossModels3e} consists in dividing the appropriate
parameter of the frequency distribution by $2^n$, with $n$ such that
$f_S(0) > 0$ and the recursions can start. One then computes the
aggregate claim amount distribution using the recursive method and
then convolves the resulting distribution $n$ times with itself to
obtain the final distribution. Function \code{aggregateDist} supports
this procedure through its argument \code{convolve}.

The function returns an object of class \code{"aggregateDist"}
inheriting from the \code{"function"} class. Thus, one can use the
object as a function to compute the value of $F_S(x)$ in any $x$.
Expand All @@ -340,11 +356,18 @@ Fs <- aggregateDist("recursive", model.freq = "poisson",
model.sev = fx, lambda = 10, x.scale = 0.5)
summary(Fs) # summary method
@
Hence, object \code{Fs} contains an empirical cdf with support
Although useless here, the following is essentially equivalent, except
in the far right tail for numerical reasons:
<<echo=TRUE>>=
knots(Fs) # support of Fs.b (knots)
Fsc <- aggregateDist("recursive", model.freq = "poisson",
model.sev = fx, lambda = 5, convolve = 1, x.scale = 0.5)
summary(Fsc) # summary method
@

We return to object \code{Fs}. It contains an empirical cdf with support
<<echo=TRUE>>=
knots(Fs) # support of Fs.b (knots)
@
A nice graph of this function is obtained with a method of \code{plot} (see
Figure \ref{fig:Fs}):
<<echo=TRUE, eval=FALSE>>=
Expand All @@ -370,16 +393,26 @@ quantile(Fs) # quantiles
quantile(Fs, 0.999) # quantiles
@

Second, the package introduces the generic functions \code{VaR} and
\code{CTE} with methods for objects of class \code{"aggregateDist"}.
The former computes the value-at-risk $\VaR_\alpha$ such that
Second, a method of \texttt{diff} gives easy access to the underlying
probability mass function:
<<echo=TRUE>>=
diff(Fs)
@
Of course, this is defined (and makes sense) for the recursive, direct
convolution and simulation methods only.

Third, the package introduces the generic functions \code{VaR} and
\code{CTE} (with alias \code{TVaR}) with methods for objects of class
\code{"aggregateDist"}. The former computes the value-at-risk
$\VaR_\alpha$ such that
\begin{equation}
\label{eq:VaR}
\Pr[S \leq \VaR_\alpha] = \alpha,
\end{equation}
where $\alpha$ is the confidence level. Thus, the value-at-risk is
nothing else than a quantile. As for the method of \code{CTE}, it
computes the conditional tail expectation
computes the conditional tail expectation (also called Tail
Value-at-Risk)
\begin{equation}
\label{eq:CTE}
\CTE_\alpha = \E{S|S > \VaR_\alpha}.
Expand Down
Binary file modified inst/doc/risk.pdf
Binary file not shown.
Binary file modified inst/doc/simulation.pdf
Binary file not shown.
Binary file modified inst/po/fr/LC_MESSAGES/R-actuar.mo
Binary file not shown.
6 changes: 3 additions & 3 deletions man/BetaMoments.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,12 @@ levbeta(limit, shape1, shape2, order = 1)
warning.
}
\seealso{
\code{\link{Beta}} for details on the Beta distribution and
\code{\link[stats]{Beta}} for details on the Beta distribution and
functions \code{{d,p,q,r}beta}.
}
\references{
Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2004),
\emph{Loss Models, From Data to Decisions, Second Edition}, Wiley.
Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2008),
\emph{Loss Models, From Data to Decisions, Third Edition}, Wiley.
}
\author{
Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and
Expand Down
Loading

0 comments on commit 3822e6e

Please sign in to comment.