Path: blob/master/sandbox/paper_analysis/R_interpretation/Sharpe_functions_Wolf.R
1433 views
12diffSharpeRatio <-3function(pr1 , pr2 , rebalweights1 , rebalweights2 , C = 0.0001 , initialV = 1000 ){45vwealth1 = vwealth2 = initialV*(1-C)6for( t in 1:length(pr1) ){7# should be a different C in function of the change in weights and thus not at the portfoli but asset level8vwealth1 = c( vwealth1 , vwealth1[t]*(1+pr1[t])*( 1-C*sum(abs(rebalweights1[t])) ) );9vwealth2 = c( vwealth2 , vwealth2[t]*(1+pr2[t])*( 1-C*sum(abs(rebalweights2[t])) ) );10}11vret1 = (vwealth1[-1]-vwealth1[1:(length(vwealth1)-1)])/vwealth1[1:(length(vwealth1)-1)]12vret2 = (vwealth2[-1]-vwealth2[1:(length(vwealth1)-1)])/vwealth2[1:(length(vwealth2)-1)]13#out = boot.time.inference( ret = cbind( vret1_mon , vret2_mon ) , b = 4 , M = 1000 )14out = hac.inference(ret = cbind(vret1,vret2) )1516return( c(out$Difference,out$p.Values[1] ) )17}181920boot.time.inference <-21function(ret, b, M, Delta.null = 0, digits = 4){22T = length(ret[,1])23l = floor(T / b)24Delta.hat = sharpe.ratio.diff(ret)25d = abs(Delta.hat - Delta.null) / compute.se.Parzen.pw(ret)26p.value = 127for (m in (1:M)) {28ret.star = ret[cbb.sequence(T, b),]29Delta.hat.star = sharpe.ratio.diff(ret.star)30ret1.star = ret.star[,1]31ret2.star = ret.star[,2]32mu1.hat.star = mean(ret1.star)33mu2.hat.star = mean(ret2.star)34gamma1.hat.star = mean(ret1.star^2)35gamma2.hat.star = mean(ret2.star^2)36gradient = rep(0, 4)37gradient[1] = gamma1.hat.star / (gamma1.hat.star -mu1.hat.star^2)^1.538gradient[2] = - gamma2.hat.star / (gamma2.hat.star - mu2.hat.star^2)^1.539gradient[3] = -0.5 * mu1.hat.star / (gamma1.hat.star - mu1.hat.star^2)^1.540gradient[4] = 0.5 * mu2.hat.star / (gamma2.hat.star - mu2.hat.star^2)^1.541y.star = data.frame(ret1.star - mu1.hat.star, ret2.star - mu2.hat.star,42ret1.star^2 - gamma1.hat.star, ret2.star^2 - gamma2.hat.star)43Psi.hat.star = matrix(0, 4, 4)44for (j in (1:l)) {45zeta.star = b^0.5 * mean(y.star[((j-1)*b+1):(j*b),])46Psi.hat.star = Psi.hat.star + zeta.star %*% t(zeta.star)47}48Psi.hat.star = Psi.hat.star / l49se.star = as.numeric(sqrt(t(gradient) %*% Psi.hat.star %*% gradient / T))50d.star = abs(Delta.hat.star - Delta.hat) / se.star51if (d.star >= d) {52p.value = p.value + 153}54}55p.value = p.value / (M + 1)56list(Difference = round(Delta.hat, digits), p.Value = round(p.value, digits))57}5859sharpe.ratio.diff <- function(ret){60ret1 = ret[,1]61ret2 = ret[,2]62mu1.hat = mean(ret1)63mu2.hat = mean(ret2)64sig1.hat = var(ret1)^.565sig2.hat = var(ret2)^.566SR1.hat = mu1.hat / sig1.hat67SR2.hat = mu2.hat / sig2.hat68diff = SR1.hat - SR2.hat69diff70}7172hac.inference <-73function(ret, digits = 3) {74ret1 = ret[,1]75ret2 = ret[,2]76mu1.hat = mean(ret1)77mu2.hat = mean(ret2)78sig1.hat = var(ret1)^.579sig2.hat = var(ret2)^.580SR1.hat = mu1.hat / sig1.hat81SR2.hat = mu2.hat / sig2.hat82SRs = round(c(SR1.hat, SR2.hat), digits)83diff = SR1.hat - SR2.hat84names(SRs) = c("SR1.hat", "SR2.hat")85Delta.hat = SR1.hat - SR2.hat86se = compute.se.Parzen(ret)87se.pw = compute.se.Parzen.pw(ret)88SEs = round(c(se, se.pw), digits)89names(SEs) = c("HAC", "HAC.pw")90PV = 2 * pnorm(-abs(diff) / se)91PV.pw = 2 * pnorm(-abs(diff)/ se.pw)92PVs = round(c(PV, PV.pw), digits)93names(PVs) = c("HAC", "HAC.pw")94list(Sharpe.Ratios = SRs, Difference = round(diff, digits),95Standard.Errors = SEs, p.Values = PVs)969798}99100compute.se.Parzen <-101function(ret){102# implements the Parzen kernel estimator with automatic choice of bandwith103ret1 = ret[,1]104ret2 = ret[,2]105T = length(ret1)106mu1.hat = mean(ret1)107mu2.hat = mean(ret2)108gamma1.hat = mean(ret1^2)109gamma2.hat = mean(ret2^2)110gradient = rep(0, 4)111gradient[1] = gamma1.hat / (gamma1.hat - mu1.hat^2)^1.5112gradient[2] = - gamma2.hat / (gamma2.hat - mu2.hat^2)^1.5113gradient[3] = -0.5 * mu1.hat / (gamma1.hat - mu1.hat^2)^1.5114gradient[4] = 0.5 * mu2.hat / (gamma2.hat - mu2.hat^2)^1.5115V.hat = compute.V.hat(ret)116Psi.hat = compute.Psi.hat(V.hat)117se = as.numeric(sqrt(t(gradient) %*% Psi.hat %*% gradient / T))118se119}120121122block.size.calibrate <-123function(ret, b.vec= c(1, 3, 6, 10), alpha = 0.05, M = 199, K = 1000, b.av = 5, T.start = 50){124b.len = length(b.vec)125emp.reject.probs = rep(0, b.len)126Delta.hat = sharpe.ratio.diff(ret)127ret1 = ret[,1]128ret2 = ret[,2]129T = length(ret1)130Var.data = matrix(0, T.start + T, 2)131#Var.data[1, ] = ret[1, ]132Var.data[1, ] = as.numeric(ret[1, ])133Delta.hat = sharpe.ratio.diff(ret)134fit1 = lm(ret1[2:T] ~ ret1[1:(T-1)] + ret2[1:(T-1)])135fit2 = lm(ret2[2:T] ~ ret1[1:(T-1)] + ret2[1:(T-1)])136coef1 = as.numeric(fit1$coef)137coef2 = as.numeric(fit2$coef)138resid.mat = cbind(as.numeric(fit1$resid), as.numeric(fit2$resid))139for (k in (1:K)) {140resid.mat.star = rbind(c(0,0), resid.mat[sb.sequence(T-1, b.av,T.start+T-1),])141print(resid.mat.star)142for (t in (2:(T.start+T))) {143Var.data[t, 1] = coef1[1] + coef1[2]*Var.data[t-1,1] +144coef1[3]*Var.data[t-1,2] + resid.mat.star[t,1]145Var.data[t, 2] = coef2[1] + coef2[2]*Var.data[t-1,1] +146coef2[3]*Var.data[t-1,2] + resid.mat.star[t,2]147}148# print(Var.data)149Var.data.trunc = Var.data[(T.start+1):(T.start+T), ]150for (j in (1:b.len)) {151p.Value = boot.time.inference(Var.data.trunc, b.vec[j], M, Delta.hat)$p.Value152if (p.Value <= alpha) {153emp.reject.probs[j] = emp.reject.probs[j] + 1154}155}156}157emp.reject.probs = emp.reject.probs / K158b.order = order(abs(emp.reject.probs - alpha))159b.opt = b.vec[b.order[1]]160b.vec.with.probs = rbind(b.vec, emp.reject.probs)161colnames(b.vec.with.probs) = rep("", length(b.vec))162list(Empirical.Rejection.Probs = b.vec.with.probs, b.optimal = b.opt)163}164165compute.V.hat<-166function(ret){167# what Andrews (1991) calls V.hat = V(theta.hat) in our context168ret1 = ret[,1]169ret2 = ret[,2]170V.hat = cbind(ret1 - mean(ret1), ret2 - mean(ret2),171ret1^2 - mean(ret1^2), ret2^2 - mean(ret2^2))172V.hat173}174175compute.Psi.hat<-176function(V.hat) {177# see first part of (2.5) of Andrews (1991)178# except we call Psi.hat what he calls J.hat there179T = length(V.hat[,1])180alpha.hat = compute.alpha.hat(V.hat)181S.star = 2.6614 * (alpha.hat * T)^.2182Psi.hat = compute.Gamma.hat(V.hat, 0)183j = 1184while (j < S.star) {185Gamma.hat = compute.Gamma.hat(V.hat, j)186Psi.hat = Psi.hat + kernel.Parzen(j / S.star) * (Gamma.hat + t(Gamma.hat))187j = j + 1188}189Psi.hat = (T / (T - 4)) * Psi.hat190Psi.hat191}192193compute.alpha.hat<-194function(V.hat){195# see (6.4) of Andrews (1991)196dimensions = dim(V.hat)197T = dimensions[1]198p = dimensions[2]199numerator = 0200denominator = 0201for (i in (1:p)) {202fit = ar(V.hat[, i], 0, 1, method = "ols")203rho.hat = as.numeric(fit[2])204sig.hat = sqrt(as.numeric(fit[3]))205numerator = numerator + 4 * rho.hat^2 * sig.hat^4 / (1 - rho.hat)^8206denominator = denominator + sig.hat^4 / (1 - rho.hat)^4207}208numerator / denominator209}210211compute.Gamma.hat<-212function(V.hat, j){213# see second part of (2.5) of Andrews (1991)214dimensions = dim(V.hat)215T = dimensions[1]216p = dimensions[2]217Gamma.hat = matrix(0, p, p)218if (j >= T)219stop("j must be smaller than the row dimension!")220for (i in ((j+1):T))221Gamma.hat = Gamma.hat + V.hat[i,] %*% t(V.hat[i - j,])222Gamma.hat = Gamma.hat / T223Gamma.hat224}225kernel.Parzen<-226function(x)227{228if (abs(x) <= 0.5)229result = 1 - 6 * x^2 + 6 * abs(x)^3230else if (abs(x) <= 1)231result = 2 * (1 - abs(x))^3232else233result = 0234result235}236compute.se.Parzen.pw<-237function(ret){238# implements the prewhitened Parzen kernel estimator of A&M (1992)239ret1 = ret[,1]240ret2 = ret[,2]241mu1.hat = mean(ret1)242mu2.hat = mean(ret2)243gamma1.hat = mean(ret1^2)244gamma2.hat = mean(ret2^2)245gradient = rep(0, 4)246gradient[1] = gamma1.hat / (gamma1.hat - mu1.hat^2)^1.5247gradient[2] = - gamma2.hat / (gamma2.hat - mu2.hat^2)^1.5248gradient[3] = -0.5 * mu1.hat / (gamma1.hat - mu1.hat^2)^1.5249gradient[4] = 0.5 * mu2.hat / (gamma2.hat - mu2.hat^2)^1.5250T = length(ret1)251V.hat = compute.V.hat(ret)252A.ls = matrix(0, 4, 4)253V.star = matrix(0, T - 1, 4)254reg1 = V.hat[1:T-1,1]255reg2 = V.hat[1:T-1,2]256reg3 = V.hat[1:T-1,3]257reg4 = V.hat[1:T-1,4]258for (j in (1:4)) {259fit = lm(V.hat[2:T,j] ~ -1 + reg1 + reg2 + reg3 + reg4)260A.ls[j,] = as.numeric(fit$coef)261V.star[,j] = as.numeric(fit$resid)262}263# SVD adjustment of A&M (1992, page 957)264svd.A = svd(A.ls)265d = svd.A$d266d.adj = d267for (i in (1:4)) {268if (d[i] > 0.97)269d.adj[i] = 0.97270else if (d[i] < -0.97)271d.adj[i] = -0.97272}273A.hat = svd.A$u %*% diag(d.adj) %*% t(svd.A$v)274D = solve(diag(4) - A.hat)275reg.mat = rbind(reg1, reg2, reg3, reg4)276for (j in (1:4)) {277V.star[,j] = V.hat[2:T,j] - A.hat[j,] %*% reg.mat278}279Psi.hat = compute.Psi.hat(V.star)280Psi.hat = D %*% Psi.hat %*% t(D)281se = as.numeric(sqrt(t(gradient) %*% Psi.hat %*% gradient / T))282se283}284285286