sapleES2

Algorithm in R to compute the BEST possible ES for the sum of d random losses identically distributed as F as described in Table 1 in

Puccetti, G. (2013). Sharp bounds on the expected shortfall for a sum of dependent random variables. Stat. Probabil. Lett., 83(4), 1227-1232. paper - preprint

The code has been improved according to the discretization technique described in Section 2.2.1 in

Jakobsons, E., Vanduffel, S. (2015). Dependence Uncertainty Bounds for the Expectile of a Portfolio. Risks 3, 599-623. paper

# STEP 1.  CREATION OF THE MATRIX X

# epsilon = tolerance for stopping condition

epsilon=0.0001

# alpha = confidence level at which ES is computed

alpha=0.99

# N = cardinality of the support of the approximation of EACH of the marginal distribution (the larger N, the more accurate, the more time expensive the algorithm)

N=10^5

# d = dimensionality of the risk portfolio 

d=8

distribution=switch(1,'Pareto','lognormal','exponential') # choose distribution

theta=2 # Pareto parameter

lnmu=6.474105; lnsd=0.7213475 # lognormal parameters

lambda=1/3 # exponential rate

# Q(p) = quantile function of the marginal distribution F (this can be changed according to the end-user needs). 

# TI(p) = integral of Q from p to 1

Q=function(p){switch(distribution,

                     Pareto=(1-p)^(-1/theta)-1,

                     lognormal=qlnorm(p, meanlog = lnmu, sdlog = lnsd, lower.tail = TRUE, log.p = FALSE),

                     exponential=-log(1-p)/lambda)}

TI=function(p){switch(distribution,

                      Pareto=theta/(theta-1)*(1-p)^(1-1/theta)-(1-p),

                      lognormal=exp(lnmu+lnsd^2/2)*(1-pnorm(qnorm(p)-lnsd)),

                      exponential=(1-p)*(1-log(1-p))/lambda)}

# midpoint discretization

discretization=Q((1:N-0.5)/N)

# replace endpoints by tail expectations

discretization[N]=N*TI(1-1/N)

discretization[1]=N*(TI(0)-TI(1/N))

# X=auxiliary matrix

X=matrix(rep(discretization,d),nrow=N)

# STEP 2-3.  ITERATIVE REARRANGEMENT

# ES = recursive value for the best possible ES

ES=Inf

# delta = difference between two consecutive estimates 

delta=Inf

# iteratively rearrange each column of the matrix until the difference between

# two consecutive estimates of the best ES is less than epsilon

while(delta >epsilon ){

#EStemp = auxiliary variable

EStemp=ES

for(j in 1:d) {

# rearrangement procedure

X[,j]=sort(X[,j],decreasing=TRUE)[rank(rowSums(X[,c(1:d)[-j]]),ties.method='first')] }

# computation of the best ES in one iteration

#Y auxiliary variable representing the vector of the row-wise sums of X in increasing order

Y=sort(rowSums(X))

ES =sum(Y[(floor(N*alpha)+1):N])/N/(1-alpha)

delta=abs(ES-EStemp) 

}

# OUTPUT

# Output=best possible ES at the given tolerance level

print(ES)

# SANITY CHECK AND ACCURACY OF THE RA

# for comparison, also compute the bounds from Bernard, C., X. Jiang, and R. Wang (2014).

# Risk aggregation with dependence uncertainty. Insurance: Mathematics and Economics 54, 93–108.

# these are valid for distributions with decreasing densities (Pareto, exponential, but not necessarily for lognormal)

Cd=uniroot(function(x){(1-d*x)*((d-1)*Q((d-1)*x)+Q(1-x))-d*(TI((d-1)*x)-TI(1-x))},c(epsilon,1/d-epsilon),tol=1e-6)$root

if((1-alpha)/d<Cd){

  ESbjw=d*(TI(1-(1-alpha)/d)+TI(0)-TI((d-1)*(1-alpha)/d))/(1-alpha)

} else {

  ESbjw=(d*TI(0)-alpha*((d-1)*Q((d-1)*Cd)+Q(1-Cd)))/(1-alpha)

}

print(ESbjw)

if(ES<ESbjw)

{

  cat('RA underestimates by ',round(1000*(1-ES/ESbjw))/10,'%\n')

} else {

  cat('RA overestimates by ',round(1000*(ES/ESbjw-1))/10,'%\n')

}