Hey everyone!

Attached below is my code. It has worked well in the Quant book but when put into my algorithm and during backtesting, it keeps telling me the data coming from the ‘_black_litterman_’ function * ‘ must be 1 dimensional’*, when I'm assured it is. Can someone help me understand my problem and why it keeps failing during backtesting?

NOTE : The problematic line seems to be line 92 : starting *‘mu_bl = …’*

`import pandas as pd`

import numpy as np

from scipy.optimize import minimize

from sklearn.linear_model import ElasticNet

from sklearn.model_selection import GridSearchCV

from numpy.linalg import inv

from AlgorithmImports import *

from datetime import datetime

from datetime import timedelta

from io import StringIO

import matplotlib as mpl

from matplotlib import cm

import matplotlib.pyplot as plt

from arch import arch_model

class ghostyfund(QCAlgorithm):

def Initialize(self):

self.SetStartDate(2020,1,1)

self.SetEndDate(2021,1,1)

self.SetCash(10000)

self.UniverseSettings.Resolution = Resolution.Daily

self.AddUniverse(self.CoarseSelectionFunction, self.FineSelectionFunction)

self.UniverseSettings.Leverage = 3

self._changes = None

def implied_returns(self, delta, sigma, w):

"""

Inputs:

delta = Risk aversion coefficient (scalar) (excess return on the market / variance of the market)

w = portfolio weights (N x 1) as Series

sigma = N x N covariance matrix as df

Returns a (N x 1) vector of implied expected returns by reverse engineering the portfolio weights

"""

ir = delta * sigma.values.dot(w).squeeze()

return ir

def proportional_prior(self, sigma, tau, p, index):

"""

Inputs:

sigma = N x N covariance matrix as df

tau = a scalar

p = a K x N df linking Q & Assets

Returns a (P x P) df, a matrix representing prior uncertainties

"""

helit_omega = p.values.dot(tau * sigma.values).dot(p.values.T)

return pd.DataFrame(np.diag(np.diag(helit_omega)), index=index, columns=index)

def _black_litterman_(self, w_prior, sigma_prior, p, p_not_values, q, omega=None, delta=2.5, tau=0.2):

"""

Computes the black litterman expected returns

W.prior must be an N x 1 vector of weights, series

Sigma.prior is an N x N covariance matrix, a df

P must be an K x N linking Q and the assets, a df

Q must be a K x 1 vector of views, a series

Omega must be a K x K matrix, a df

if omega == None:

its proportional to the variance of the prior

Delta, tau are scalars

"""

index = sigma_prior.index

def as_column_vec(x):

if (x.ndim == 2):

return x

else:

return np.expand_dims(x, axis=1)

if omega is None:

omega = self.proportional_prior(sigma_prior, tau, p_not_values, index)

# no of assets

N = w_prior.shape[0]

# no of views

K = q.shape[0]

# 1) Reverse engineer weights to get pi

pi = self.implied_returns(delta, sigma_prior, w_prior)

# 2) Adjust (scale) sigma by the uncertainty scale factor

sigma_prior_scaled = sigma_prior.values * tau

sigma_prior = sigma_prior.values

# note; .dot == matrix multiplication

mu_bl = pi + sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega).dot(q - p.dot(pi)))

# posterior estimate of uncertainty of mu_bl

sigma_bl = sigma_prior + sigma_prior_scaled - sigma_prior_scaled.dot(p.T).dot(inv(p.dot(sigma_prior_scaled).dot(p.T) + omega)).dot(p).dot(sigma_prior_scaled)

return (mu_bl, sigma_bl)

def black_litty_implied_rets(self, tickers):

q = pd.Series(index=tickers, data=[0.03]*int(len(tickers)))

p = pd.DataFrame(columns=tickers, index=range(0, len(tickers)))

w = pd.Series({}, index=tickers).fillna(float(1/len(tickers)))

delta = 2.5

tau = .02

for i in range(0, len(p.columns)):

p.iloc[i, i] = 1

p = p.fillna(0)

history = self.History(tickers, 60, Resolution.Daily)["close"]

sigma_prior = history.pct_change().unstack(level=0).dropna().cov()

index = sigma_prior.index

pi = self.implied_returns(delta, sigma_prior, w)

omega = self.proportional_prior(sigma_prior, tau, p, index)

bl = self._black_litterman_(w_prior=w, sigma_prior=sigma_prior, p=p.values, p_not_values=p, q=q)

df = pd.DataFrame(bl[0])

df["Ticker"] = tickers

df = df.pivot_table(columns="Ticker")

exp_rets = df.sort_values(by=0, ascending=False, axis=1)

#highest_exp_ret = exp_rets.iloc[0].index[0]

return exp_rets

def monte_carlo_gbm(self, n_years, n_scenarios, mu, sigma, steps_per_year, s_0):

"""

Evolution of Geometric Brownian Motion trajectories, such as for Stock Prices through Monte Carlo

:param n_years: The number of years to generate data for

:param n_paths: The number of scenarios/trajectories

:param mu: Annualized Drift, e.g. Market Return

:param sigma: Annualized Volatility

:param steps_per_year: granularity of the simulation

:param s_0: initial value

:return: a numpy array of n_paths columns and n_years*steps_per_year rows

"""

dt = 1/steps_per_year

n_steps = int(n_years*steps_per_year) + 1

rets_plus_1 = np.random.normal(loc=mu*dt+1, scale=sigma*np.sqrt(dt), size=(n_steps, n_scenarios))

rets_plus_1[0] = 1

prices = s_0*pd.DataFrame(rets_plus_1).cumprod()

return prices

def get_max_drawdown(self, array):

drawdowns = []

for i in range(len(array)):

max_array = max(array[:i+1])

drawdown = max_array - array[i]

drawdowns.append(drawdown)

return max(drawdowns)

def CoarseSelectionFunction(self, coarse):

sortedByDollarVolume = sorted(coarse, key=lambda x: x.DollarVolume, reverse=True)

return [x.Symbol for x in sortedByDollarVolume]

def FineSelectionFunction(self, fine):

sortedByPeRatio = sorted(fine, key=lambda x: x.ValuationRatios.PERatio)

tickers = [x.Symbol for x in sortedByPeRatio][:5]

return tickers

def OnData(self, data):

if self._changes is None:

return

tickers = [x.Symbol for x in self._changes.AddedSecurities]

mu = self.black_litty_implied_rets(tickers)

iteration = 0

avg_drawdowns = []

for ticker in tickers:

symbol = self.AddEquity(ticker).Symbol

symbol_hist = self.History(symbol, 252, Resolution.Daily)["close"]

returns = symbol_hist.pct_change().dropna().unstack(level=0)

vol = returns.rolling(window=30, center=False).std().dropna()

am = arch_model(vol, vol='Garch', p=1, o=0, q=1, dist='Normal')

res = am.fit(disp='off')

garch_implied_vol = 0.1 * np.sqrt(res.params['omega'] + res.params['alpha[1]'] * res.resid**2 +

res.conditional_volatility**2 * res.params['beta[1]'])

mc = self.monte_carlo_gbm(n_years=1, n_scenarios=2500,

mu=mu.iloc[:, iteration], sigma=garch_implied_vol.max(), steps_per_year=252, s_0=100)

drawdowns = []

for i in range(0, len(mc.columns)):

drawdown = self.get_max_drawdown(mc[i])

drawdowns.append(drawdown)

avg_drawd = np.mean(drawdowns)

avg_drawdowns.append(avg_drawd)

iteration += 1

df = pd.DataFrame(avg_drawdowns)

df["Ticker"] = tickers

df = df.pivot_table(columns="Ticker")

expected_drawdowns = df.sort_values(by=0, ascending=True, axis=1)

ticker_symbol = expected_drawdowns.columns[0]

self.SetHoldings(ticker_symbol, 1)

self._changes = None

def OnSecuritiesChanged(self, changes):

self._changes = changes