forked from linzebing/inverse_volatility_caculation
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrisk_parity.py
131 lines (93 loc) · 4.36 KB
/
risk_parity.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# https://quantdare.com/risk-parity-in-python/
import pandas as pd
import pandas_datareader.data as web
import numpy as np
import datetime
from scipy.optimize import minimize
TOLERANCE = 1e-10
def _allocation_risk(weights, covariances):
# We calculate the risk of the weights distribution
portfolio_risk = np.sqrt((weights * covariances * weights.T))[0, 0]
# It returns the risk of the weights distribution
return portfolio_risk
def _assets_risk_contribution_to_allocation_risk(weights, covariances):
# We calculate the risk of the weights distribution
portfolio_risk = _allocation_risk(weights, covariances)
# We calculate the contribution of each asset to the risk of the weights
# distribution
assets_risk_contribution = np.multiply(weights.T, covariances * weights.T) \
/ portfolio_risk
# It returns the contribution of each asset to the risk of the weights
# distribution
return assets_risk_contribution
def _risk_budget_objective_error(weights, args):
# The covariance matrix occupies the first position in the variable
covariances = args[0]
# The desired contribution of each asset to the portfolio risk occupies the
# second position
assets_risk_budget = args[1]
# We convert the weights to a matrix
weights = np.matrix(weights)
# We calculate the risk of the weights distribution
portfolio_risk = _allocation_risk(weights, covariances)
# We calculate the contribution of each asset to the risk of the weights
# distribution
assets_risk_contribution = \
_assets_risk_contribution_to_allocation_risk(weights, covariances)
# We calculate the desired contribution of each asset to the risk of the
# weights distribution
assets_risk_target = \
np.asmatrix(np.multiply(portfolio_risk, assets_risk_budget))
# Error between the desired contribution and the calculated contribution of
# each asset
error = \
sum(np.square(assets_risk_contribution - assets_risk_target.T))[0, 0]
# It returns the calculated error
return error
def _get_risk_parity_weights(covariances, assets_risk_budget, initial_weights):
# Restrictions to consider in the optimisation: only long positions whose
# sum equals 100%
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1.0},
{'type': 'ineq', 'fun': lambda x: x})
# Optimisation process in scipy
optimize_result = minimize(fun=_risk_budget_objective_error,
x0=initial_weights,
args=[covariances, assets_risk_budget],
method='SLSQP',
constraints=constraints,
tol=TOLERANCE,
options={'disp': False})
# Recover the weights from the optimised object
weights = optimize_result.x
# It returns the optimised weights
return weights
def get_weights(yahoo_tickers=['GOOGL', 'AAPL', 'AMZN'],
start_date=datetime.datetime(2016, 10, 31),
end_date=datetime.datetime(2017, 10, 31)):
# We download the prices from Yahoo Finance
prices = pd.DataFrame([web.DataReader(t,
'yahoo',
start_date,
end_date).loc[:, 'Adj Close']
for t in yahoo_tickers],
index=yahoo_tickers).T.asfreq('B').ffill()
# We calculate the covariance matrix
covariances = 52.0 * \
prices.asfreq('W-FRI').pct_change().iloc[1:, :].cov().values
# The desired contribution of each asset to the portfolio risk: we want all
# asset to contribute equally
assets_risk_budget = [1 / prices.shape[1]] * prices.shape[1]
# Initial weights: equally weighted
init_weights = [1 / prices.shape[1]] * prices.shape[1]
# Optimisation process of weights
weights = \
_get_risk_parity_weights(covariances, assets_risk_budget, init_weights)
# Convert the weights to a pandas Series
weights = pd.Series(weights, index=prices.columns, name='weight')
# It returns the optimised weights
return weights
symbols = ['SPXL', 'SSO', 'VOO']
w = get_weights(yahoo_tickers=symbols,
start_date=datetime.datetime(2020, 4, 1),
end_date=datetime.datetime(2021, 12, 31))
print(w)