Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
braverock
GitHub Repository: braverock/portfolioanalytics
Path: blob/master/sandbox/paper_analysis/R_interpretation/Sharpe_functions_Wolf.R
1433 views
1
2
3
diffSharpeRatio <-
4
function(pr1 , pr2 , rebalweights1 , rebalweights2 , C = 0.0001 , initialV = 1000 ){
5
6
vwealth1 = vwealth2 = initialV*(1-C)
7
for( t in 1:length(pr1) ){
8
# should be a different C in function of the change in weights and thus not at the portfoli but asset level
9
vwealth1 = c( vwealth1 , vwealth1[t]*(1+pr1[t])*( 1-C*sum(abs(rebalweights1[t])) ) );
10
vwealth2 = c( vwealth2 , vwealth2[t]*(1+pr2[t])*( 1-C*sum(abs(rebalweights2[t])) ) );
11
}
12
vret1 = (vwealth1[-1]-vwealth1[1:(length(vwealth1)-1)])/vwealth1[1:(length(vwealth1)-1)]
13
vret2 = (vwealth2[-1]-vwealth2[1:(length(vwealth1)-1)])/vwealth2[1:(length(vwealth2)-1)]
14
#out = boot.time.inference( ret = cbind( vret1_mon , vret2_mon ) , b = 4 , M = 1000 )
15
out = hac.inference(ret = cbind(vret1,vret2) )
16
17
return( c(out$Difference,out$p.Values[1] ) )
18
}
19
20
21
boot.time.inference <-
22
function(ret, b, M, Delta.null = 0, digits = 4){
23
T = length(ret[,1])
24
l = floor(T / b)
25
Delta.hat = sharpe.ratio.diff(ret)
26
d = abs(Delta.hat - Delta.null) / compute.se.Parzen.pw(ret)
27
p.value = 1
28
for (m in (1:M)) {
29
ret.star = ret[cbb.sequence(T, b),]
30
Delta.hat.star = sharpe.ratio.diff(ret.star)
31
ret1.star = ret.star[,1]
32
ret2.star = ret.star[,2]
33
mu1.hat.star = mean(ret1.star)
34
mu2.hat.star = mean(ret2.star)
35
gamma1.hat.star = mean(ret1.star^2)
36
gamma2.hat.star = mean(ret2.star^2)
37
gradient = rep(0, 4)
38
gradient[1] = gamma1.hat.star / (gamma1.hat.star -mu1.hat.star^2)^1.5
39
gradient[2] = - gamma2.hat.star / (gamma2.hat.star - mu2.hat.star^2)^1.5
40
gradient[3] = -0.5 * mu1.hat.star / (gamma1.hat.star - mu1.hat.star^2)^1.5
41
gradient[4] = 0.5 * mu2.hat.star / (gamma2.hat.star - mu2.hat.star^2)^1.5
42
y.star = data.frame(ret1.star - mu1.hat.star, ret2.star - mu2.hat.star,
43
ret1.star^2 - gamma1.hat.star, ret2.star^2 - gamma2.hat.star)
44
Psi.hat.star = matrix(0, 4, 4)
45
for (j in (1:l)) {
46
zeta.star = b^0.5 * mean(y.star[((j-1)*b+1):(j*b),])
47
Psi.hat.star = Psi.hat.star + zeta.star %*% t(zeta.star)
48
}
49
Psi.hat.star = Psi.hat.star / l
50
se.star = as.numeric(sqrt(t(gradient) %*% Psi.hat.star %*% gradient / T))
51
d.star = abs(Delta.hat.star - Delta.hat) / se.star
52
if (d.star >= d) {
53
p.value = p.value + 1
54
}
55
}
56
p.value = p.value / (M + 1)
57
list(Difference = round(Delta.hat, digits), p.Value = round(p.value, digits))
58
}
59
60
sharpe.ratio.diff <- function(ret){
61
ret1 = ret[,1]
62
ret2 = ret[,2]
63
mu1.hat = mean(ret1)
64
mu2.hat = mean(ret2)
65
sig1.hat = var(ret1)^.5
66
sig2.hat = var(ret2)^.5
67
SR1.hat = mu1.hat / sig1.hat
68
SR2.hat = mu2.hat / sig2.hat
69
diff = SR1.hat - SR2.hat
70
diff
71
}
72
73
hac.inference <-
74
function(ret, digits = 3) {
75
ret1 = ret[,1]
76
ret2 = ret[,2]
77
mu1.hat = mean(ret1)
78
mu2.hat = mean(ret2)
79
sig1.hat = var(ret1)^.5
80
sig2.hat = var(ret2)^.5
81
SR1.hat = mu1.hat / sig1.hat
82
SR2.hat = mu2.hat / sig2.hat
83
SRs = round(c(SR1.hat, SR2.hat), digits)
84
diff = SR1.hat - SR2.hat
85
names(SRs) = c("SR1.hat", "SR2.hat")
86
Delta.hat = SR1.hat - SR2.hat
87
se = compute.se.Parzen(ret)
88
se.pw = compute.se.Parzen.pw(ret)
89
SEs = round(c(se, se.pw), digits)
90
names(SEs) = c("HAC", "HAC.pw")
91
PV = 2 * pnorm(-abs(diff) / se)
92
PV.pw = 2 * pnorm(-abs(diff)/ se.pw)
93
PVs = round(c(PV, PV.pw), digits)
94
names(PVs) = c("HAC", "HAC.pw")
95
list(Sharpe.Ratios = SRs, Difference = round(diff, digits),
96
Standard.Errors = SEs, p.Values = PVs)
97
98
99
}
100
101
compute.se.Parzen <-
102
function(ret){
103
# implements the Parzen kernel estimator with automatic choice of bandwith
104
ret1 = ret[,1]
105
ret2 = ret[,2]
106
T = length(ret1)
107
mu1.hat = mean(ret1)
108
mu2.hat = mean(ret2)
109
gamma1.hat = mean(ret1^2)
110
gamma2.hat = mean(ret2^2)
111
gradient = rep(0, 4)
112
gradient[1] = gamma1.hat / (gamma1.hat - mu1.hat^2)^1.5
113
gradient[2] = - gamma2.hat / (gamma2.hat - mu2.hat^2)^1.5
114
gradient[3] = -0.5 * mu1.hat / (gamma1.hat - mu1.hat^2)^1.5
115
gradient[4] = 0.5 * mu2.hat / (gamma2.hat - mu2.hat^2)^1.5
116
V.hat = compute.V.hat(ret)
117
Psi.hat = compute.Psi.hat(V.hat)
118
se = as.numeric(sqrt(t(gradient) %*% Psi.hat %*% gradient / T))
119
se
120
}
121
122
123
block.size.calibrate <-
124
function(ret, b.vec= c(1, 3, 6, 10), alpha = 0.05, M = 199, K = 1000, b.av = 5, T.start = 50){
125
b.len = length(b.vec)
126
emp.reject.probs = rep(0, b.len)
127
Delta.hat = sharpe.ratio.diff(ret)
128
ret1 = ret[,1]
129
ret2 = ret[,2]
130
T = length(ret1)
131
Var.data = matrix(0, T.start + T, 2)
132
#Var.data[1, ] = ret[1, ]
133
Var.data[1, ] = as.numeric(ret[1, ])
134
Delta.hat = sharpe.ratio.diff(ret)
135
fit1 = lm(ret1[2:T] ~ ret1[1:(T-1)] + ret2[1:(T-1)])
136
fit2 = lm(ret2[2:T] ~ ret1[1:(T-1)] + ret2[1:(T-1)])
137
coef1 = as.numeric(fit1$coef)
138
coef2 = as.numeric(fit2$coef)
139
resid.mat = cbind(as.numeric(fit1$resid), as.numeric(fit2$resid))
140
for (k in (1:K)) {
141
resid.mat.star = rbind(c(0,0), resid.mat[sb.sequence(T-1, b.av,T.start+T-1),])
142
print(resid.mat.star)
143
for (t in (2:(T.start+T))) {
144
Var.data[t, 1] = coef1[1] + coef1[2]*Var.data[t-1,1] +
145
coef1[3]*Var.data[t-1,2] + resid.mat.star[t,1]
146
Var.data[t, 2] = coef2[1] + coef2[2]*Var.data[t-1,1] +
147
coef2[3]*Var.data[t-1,2] + resid.mat.star[t,2]
148
}
149
# print(Var.data)
150
Var.data.trunc = Var.data[(T.start+1):(T.start+T), ]
151
for (j in (1:b.len)) {
152
p.Value = boot.time.inference(Var.data.trunc, b.vec[j], M, Delta.hat)$p.Value
153
if (p.Value <= alpha) {
154
emp.reject.probs[j] = emp.reject.probs[j] + 1
155
}
156
}
157
}
158
emp.reject.probs = emp.reject.probs / K
159
b.order = order(abs(emp.reject.probs - alpha))
160
b.opt = b.vec[b.order[1]]
161
b.vec.with.probs = rbind(b.vec, emp.reject.probs)
162
colnames(b.vec.with.probs) = rep("", length(b.vec))
163
list(Empirical.Rejection.Probs = b.vec.with.probs, b.optimal = b.opt)
164
}
165
166
compute.V.hat<-
167
function(ret){
168
# what Andrews (1991) calls V.hat = V(theta.hat) in our context
169
ret1 = ret[,1]
170
ret2 = ret[,2]
171
V.hat = cbind(ret1 - mean(ret1), ret2 - mean(ret2),
172
ret1^2 - mean(ret1^2), ret2^2 - mean(ret2^2))
173
V.hat
174
}
175
176
compute.Psi.hat<-
177
function(V.hat) {
178
# see first part of (2.5) of Andrews (1991)
179
# except we call Psi.hat what he calls J.hat there
180
T = length(V.hat[,1])
181
alpha.hat = compute.alpha.hat(V.hat)
182
S.star = 2.6614 * (alpha.hat * T)^.2
183
Psi.hat = compute.Gamma.hat(V.hat, 0)
184
j = 1
185
while (j < S.star) {
186
Gamma.hat = compute.Gamma.hat(V.hat, j)
187
Psi.hat = Psi.hat + kernel.Parzen(j / S.star) * (Gamma.hat + t(Gamma.hat))
188
j = j + 1
189
}
190
Psi.hat = (T / (T - 4)) * Psi.hat
191
Psi.hat
192
}
193
194
compute.alpha.hat<-
195
function(V.hat){
196
# see (6.4) of Andrews (1991)
197
dimensions = dim(V.hat)
198
T = dimensions[1]
199
p = dimensions[2]
200
numerator = 0
201
denominator = 0
202
for (i in (1:p)) {
203
fit = ar(V.hat[, i], 0, 1, method = "ols")
204
rho.hat = as.numeric(fit[2])
205
sig.hat = sqrt(as.numeric(fit[3]))
206
numerator = numerator + 4 * rho.hat^2 * sig.hat^4 / (1 - rho.hat)^8
207
denominator = denominator + sig.hat^4 / (1 - rho.hat)^4
208
}
209
numerator / denominator
210
}
211
212
compute.Gamma.hat<-
213
function(V.hat, j){
214
# see second part of (2.5) of Andrews (1991)
215
dimensions = dim(V.hat)
216
T = dimensions[1]
217
p = dimensions[2]
218
Gamma.hat = matrix(0, p, p)
219
if (j >= T)
220
stop("j must be smaller than the row dimension!")
221
for (i in ((j+1):T))
222
Gamma.hat = Gamma.hat + V.hat[i,] %*% t(V.hat[i - j,])
223
Gamma.hat = Gamma.hat / T
224
Gamma.hat
225
}
226
kernel.Parzen<-
227
function(x)
228
{
229
if (abs(x) <= 0.5)
230
result = 1 - 6 * x^2 + 6 * abs(x)^3
231
else if (abs(x) <= 1)
232
result = 2 * (1 - abs(x))^3
233
else
234
result = 0
235
result
236
}
237
compute.se.Parzen.pw<-
238
function(ret){
239
# implements the prewhitened Parzen kernel estimator of A&M (1992)
240
ret1 = ret[,1]
241
ret2 = ret[,2]
242
mu1.hat = mean(ret1)
243
mu2.hat = mean(ret2)
244
gamma1.hat = mean(ret1^2)
245
gamma2.hat = mean(ret2^2)
246
gradient = rep(0, 4)
247
gradient[1] = gamma1.hat / (gamma1.hat - mu1.hat^2)^1.5
248
gradient[2] = - gamma2.hat / (gamma2.hat - mu2.hat^2)^1.5
249
gradient[3] = -0.5 * mu1.hat / (gamma1.hat - mu1.hat^2)^1.5
250
gradient[4] = 0.5 * mu2.hat / (gamma2.hat - mu2.hat^2)^1.5
251
T = length(ret1)
252
V.hat = compute.V.hat(ret)
253
A.ls = matrix(0, 4, 4)
254
V.star = matrix(0, T - 1, 4)
255
reg1 = V.hat[1:T-1,1]
256
reg2 = V.hat[1:T-1,2]
257
reg3 = V.hat[1:T-1,3]
258
reg4 = V.hat[1:T-1,4]
259
for (j in (1:4)) {
260
fit = lm(V.hat[2:T,j] ~ -1 + reg1 + reg2 + reg3 + reg4)
261
A.ls[j,] = as.numeric(fit$coef)
262
V.star[,j] = as.numeric(fit$resid)
263
}
264
# SVD adjustment of A&M (1992, page 957)
265
svd.A = svd(A.ls)
266
d = svd.A$d
267
d.adj = d
268
for (i in (1:4)) {
269
if (d[i] > 0.97)
270
d.adj[i] = 0.97
271
else if (d[i] < -0.97)
272
d.adj[i] = -0.97
273
}
274
A.hat = svd.A$u %*% diag(d.adj) %*% t(svd.A$v)
275
D = solve(diag(4) - A.hat)
276
reg.mat = rbind(reg1, reg2, reg3, reg4)
277
for (j in (1:4)) {
278
V.star[,j] = V.hat[2:T,j] - A.hat[j,] %*% reg.mat
279
}
280
Psi.hat = compute.Psi.hat(V.star)
281
Psi.hat = D %*% Psi.hat %*% t(D)
282
se = as.numeric(sqrt(t(gradient) %*% Psi.hat %*% gradient / T))
283
se
284
}
285
286