Path: blob/master/sandbox/FFEV/HermiteGrid_CVaR_Recursion.R
1433 views
# This script illustrates the discrete Newton recursion to process views on CVaR according to Entropy Pooling1# This script complements the article2# "Fully Flexible Extreme Views"3# by A. Meucci, D. Ardia, S. Keel4# available at www.ssrn.com5# The most recent version of this code is available at6# MATLAB Central - File Exchange78# Prior market model (normal) on grid9emptyMatrix = matrix( nrow=0 , ncol=0 )10market.mu = 0.011market.sig2 = 1.012market.pdf = function(x) dnorm( x , mean = market.mu , sd = sqrt(market.sig2) )13market.cdf = function(x) pnorm( x , mean = market.mu , sd = sqrt(market.sig2) )14market.rnd = function(x) rnorm( x , mean = market.mu , sd = sqrt(market.sig2) )15market.inv = function(x) qnorm( x , mean = market.mu , sd = sqrt(market.sig2) )16market.VaR95 = market.inv(0.05)17market.CVaR95 = integrate( function( x ) ( x * market.pdf( x ) ), -100, market.VaR95)$val / 0.051819tmp = ( ghqx - min( ghqx ) )/( max( ghqx ) - min( ghqx ) ) # rescale GH zeros so they belong to [0,1]20epsilon = 1e-1021Lower = market.inv( epsilon )22Upper = market.inv( 1 - epsilon )23X = Lower + tmp * ( Upper - Lower ) # rescale mesh2425p = integrateSubIntervals( X, market.cdf )26p = normalizeProb( p )27J = nrow( X )2829# Entropy posterior from extreme view on CVaR: brute-force approach3031# view of the analyst32view.CVaR95 = -3.03334# Iterate over different VaR95 levels35nVaR95 = 10036VaR95 = seq(view.CVaR95, market.VaR95, len=nVaR95)37p_ = matrix(NaN, nrow = J, ncol = nVaR95 )38s_ = matrix(NaN, nrow = nVaR95, ncol = 1 )39KLdiv = matrix(NaN, nrow = nVaR95, ncol = 1)4041for ( i in 1:nVaR95 ) {42idx = as.matrix( X <= VaR95[i] )43s_[i] = sum(idx)44posteriorEntropy = EntropyProg(p, t( idx ), as.matrix( 0.05 ), rbind( rep(1, J), t( idx * X ) ), rbind( 1, 0.05 * view.CVaR95 ) )45p_[,i] = posteriorEntropy$p_46KLdiv[i] = posteriorEntropy$optimizationPerformance$ml47}4849# Display results50plot( s_, KLdiv )51dummy = min( KLdiv )52idxMin = which.min( KLdiv )53plot( s_[idxMin], KLdiv[idxMin] )5455tmp = p_[, idxMin]56tmp = tmp / sum( tmp )57plot( X, tmp )58x = seq(min(X), max(X), len = J);59tmp = market.pdf(x)60tmp = tmp / sum(tmp)61plot(x, tmp)62plot(market.CVaR95, 0)63plot(view.CVaR95, 0)6465# Entropy posterior from extreme view on CVaR: Newton Raphson approach6667s = emptyMatrix6869# initial value70idx = as.matrix( cumsum(p) <= 0.05 )71s[1] = sum(idx)72posteriorEntropy = EntropyProg(p, t( idx ), as.matrix( 0.05 ), rbind( rep(1, J), t( idx * X ) ), rbind( 1, 0.05 * view.CVaR95) )73KLdiv = as.matrix( posteriorEntropy$optimizationPerformance$ml )74p_ = posteriorEntropy$p_7576# iterate77doStop = 078i = 179while ( !doStop ) {80i = i + 18182idx = cbind( matrix(1, 1, s[i - 1] ), matrix(0, 1, J - s[i-1] ) )83posteriorEntropy1 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )84# [dummy, KLdiv_s] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);8586idx = cbind( matrix(1, 1, s[i - 1] + 1 ), matrix(0, 1, J - s[i - 1] - 1) )87posteriorEntropy2 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )88# [dummy, KLdiv_s1] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);8990idx = cbind( matrix(1, 1, s[i - 1] + 2 ), matrix(0, 1, J - s[i - 1] - 2) )91posteriorEntropy3 = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )92# [dummy, KLdiv_s2] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);9394# first difference95DE = posteriorEntropy2$optimizationPerformance$ml - posteriorEntropy1$optimizationPerformance$ml9697# second difference98D2E = posteriorEntropy3$optimizationPerformance$ml - 2 * posteriorEntropy2$optimizationPerformance$ml + posteriorEntropy1$optimizationPerformance$ml99100# optimal s101s = rbind( s, round( s[i - 1] - (DE / D2E) ) )102103tmp = emptyMatrix104idx = cbind( matrix( 1, 1, s[i] ), matrix( 0, 1, J - s[i] ) )105tempEntropy = EntropyProg(p, idx, as.matrix( 0.05 ), rbind( matrix(1, 1, J), t( t(idx) * X) ), rbind( 1, 0.05 * view.CVaR95 ) )106# [tmp.p_, tmp.KLdiv] = optimizeEntropy(p, [idx'; (idx .* X)'], [0.05; 0.05 * view.CVaR95], [ones(1, J); X'], [1; view.mu]);107p_ = cbind( p_, tempEntropy$p_ )108KLdiv = rbind( KLdiv, tempEntropy$optimizationPerformance$ml ) #ok<*AGROW>109110# if change in KLdiv less than one percent, stop111if( abs( ( KLdiv[i] - KLdiv[i - 1] ) / KLdiv[i - 1] ) < 0.01 ) { doStop = 1 }112}113114# Display results115116N = length(s)117118plot(1:N, KLdiv)119x = seq(min(X), max(X), len = J)120tmp = market.pdf(x)121tmp = tmp / sum(tmp)122plot( t( X ), tmp )123plot( t( X ), p_[, ncol(p_)] )124plot( market.CVaR95, 0.0 )125plot( view.CVaR95, 0.0 )126127# zoom here128plot( t( X ), tmp )129plot( t( X ), p_[, 1] )130plot( t( X ), p_[, 2] )131plot( t( X ), p_[, ncol(p_)] )132plot( market.CVaR95, 0 )133plot( view.CVaR95, 0 )134135