| Overall Statistics |
|
Total Orders 828 Average Win 0.77% Average Loss -0.56% Compounding Annual Return 9.716% Drawdown 15.500% Expectancy 0.783 Start Equity 100000 End Equity 638419.69 Net Profit 538.420% Sharpe Ratio 0.525 Sortino Ratio 0.55 Probabilistic Sharpe Ratio 9.438% Loss Rate 25% Win Rate 75% Profit-Loss Ratio 1.37 Alpha 0.03 Beta 0.275 Annual Standard Deviation 0.091 Annual Variance 0.008 Information Ratio -0.127 Tracking Error 0.14 Treynor Ratio 0.173 Total Fees $3360.04 Estimated Strategy Capacity $0 Lowest Capacity Asset QQQ RIWIV7K5Z9LX Portfolio Turnover 2.08% Drawdown Recovery 940 |
# region imports
from AlgorithmImports import *
# endregion
from math import sqrt
from utils import (
compute_momentum_score,
compute_volatility,
inv_vol_weights,
clamp,
clean_history_close
)
class HyperSharpeAdaptiveAllocation(QCAlgorithm):
"""
High-Sharpe ETF allocator with:
- Regime filter (SPY trend + momentum)
- Dual-momentum selection (12m + 3m blend)
- Inverse-vol risk parity sizing
- Volatility targeting (portfolio-level scaler)
- Trade threshold + min order value to avoid 1-share/fee noise
- Defensive crash response using drawdown from 6m high
Uses ONLY liquid ETFs to reduce data gaps and microstructure noise.
"""
def Initialize(self) -> None:
self.SetStartDate(2006, 1, 1)
self.SetCash(100000)
# Safer in backtests (avoid tiny trades warnings and fee noise)
self.Settings.MinimumOrderMarginPortfolioPercentage = 0.01 # ignore tiny target diffs
self.Settings.RebalancePortfolioOnSecurityChanges = False
self.Settings.RebalancePortfolioOnInsightChanges = False
self.UniverseSettings.Resolution = Resolution.DAILY
self.UniverseSettings.DataNormalizationMode = DataNormalizationMode.ADJUSTED
# Core / Growth / Risk-on basket
self.spy = self.AddEquity("SPY", Resolution.DAILY).Symbol
self.qqq = self.AddEquity("QQQ", Resolution.DAILY).Symbol
self.iwm = self.AddEquity("IWM", Resolution.DAILY).Symbol
self.vt = self.AddEquity("VT", Resolution.DAILY).Symbol # global core
self.vrq = self.AddEquity("VNQ", Resolution.DAILY).Symbol # real estate
self.dbc = self.AddEquity("DBC", Resolution.DAILY).Symbol # commodities trend proxy
self.efa = self.AddEquity("EFA", Resolution.DAILY).Symbol # developed ex-US
self.eem = self.AddEquity("EEM", Resolution.DAILY).Symbol # EM
# Defensive basket
self.tlt = self.AddEquity("TLT", Resolution.DAILY).Symbol # long duration
self.ief = self.AddEquity("IEF", Resolution.DAILY).Symbol # intermediate duration
self.shy = self.AddEquity("SHY", Resolution.DAILY).Symbol # cash-like
self.lqd = self.AddEquity("LQD", Resolution.DAILY).Symbol # investment grade credit
self.gld = self.AddEquity("GLD", Resolution.DAILY).Symbol # gold
self.risk_on_universe = [self.spy, self.qqq, self.iwm, self.vt, self.vrq, self.dbc, self.efa, self.eem]
self.def_universe = [self.tlt, self.ief, self.shy, self.lqd, self.gld]
# Parameters (tuned for stability, not “curve fit to one era”)
self.mom_lookback_12m = 252
self.mom_lookback_3m = 63
self.vol_lookback = 20
self.trend_sma_len = 200
self.dd_lookback = 126 # ~6 months
# Portfolio behavior controls (these matter a lot for Sharpe)
self.top_k_risk_on = 3
self.top_k_defensive = 2
self.max_asset_weight = 0.55 # prevent single-asset dominance
self.min_trade_weight_change = 0.03 # skip tiny rebalances
self.min_order_value = 1500 # skip tiny orders
self.target_annual_vol = 0.10 # volatility targeting anchor (10% annual)
# Warmup: need enough for SMA + momentum + drawdown + vol
warmup_days = max(self.trend_sma_len, self.mom_lookback_12m, self.dd_lookback, self.vol_lookback) + 5
self.SetWarmUp(warmup_days, Resolution.DAILY)
# Monthly rebalance near close (daily data trades become MOO if after hours, QC warns about this)
# So schedule shortly after market open to avoid MOO conversion warnings.
self.Schedule.On(
self.DateRules.MonthStart(self.spy),
self.TimeRules.AfterMarketOpen(self.spy, 5),
self.Rebalance
)
self.Debug(f"Init complete. Warmup days = {warmup_days}")
def Rebalance(self) -> None:
if self.IsWarmingUp:
return
# Build history once (efficient and consistent)
all_symbols = list(set(self.risk_on_universe + self.def_universe + [self.spy]))
lookback = max(self.trend_sma_len, self.mom_lookback_12m, self.dd_lookback, self.vol_lookback) + 2
hist = self.History(all_symbols, lookback, Resolution.DAILY)
closes = clean_history_close(hist)
if closes is None or closes.empty:
self.Debug(f"{self.Time.date()} No history, skipping.")
return
# Regime: SPY trend + 12m momentum
if self.spy not in closes.columns:
self.Debug(f"{self.Time.date()} Missing SPY close, skipping.")
return
spy_series = closes[self.spy].dropna()
if len(spy_series) < self.trend_sma_len + 2:
self.Debug(f"{self.Time.date()} Not enough SPY data, skipping.")
return
spy_last = float(spy_series.iloc[-1])
spy_sma200 = float(spy_series.iloc[-self.trend_sma_len:].mean())
spy_mom12 = compute_momentum_score(spy_series, self.mom_lookback_12m)
risk_on_regime = (spy_last > spy_sma200) and (spy_mom12 > 0)
# Crash response: drawdown from 6m high
dd = self._drawdown_from_recent_high(spy_series, self.dd_lookback)
# Select universe based on regime
if risk_on_regime:
selected = self._select_by_dual_momentum(closes, self.risk_on_universe, self.top_k_risk_on)
else:
selected = self._select_by_dual_momentum(closes, self.def_universe, self.top_k_defensive)
if len(selected) == 0:
# If nothing qualifies (rare), park in SHY
selected = [self.shy]
# Compute inverse-vol weights
vols = {}
for s in selected:
series = closes[s].dropna() if s in closes.columns else None
v = compute_volatility(series, self.vol_lookback)
if v is None or v <= 0:
continue
vols[s] = v
if len(vols) == 0:
selected = [self.shy]
vols = {self.shy: 0.01}
base_weights = inv_vol_weights(vols)
# Cap weights to avoid concentration (helps drawdown, often helps Sharpe)
capped = {}
remaining = 1.0
# First pass cap
for s, w in sorted(base_weights.items(), key=lambda kv: -kv[1]):
cw = min(w, self.max_asset_weight)
capped[s] = cw
remaining -= cw
# Renormalize if caps reduced sum < 1
ssum = sum(capped.values())
if ssum <= 0:
capped = {self.shy: 1.0}
else:
capped = {s: w / ssum for s, w in capped.items()}
# Crash override: if drawdown is large, shift more to SHY/IEF and reduce risk
# This is deliberately simple and robust.
crash_scale = 1.0
if dd >= 0.10:
crash_scale = 0.75
if dd >= 0.15:
crash_scale = 0.55
if dd >= 0.20:
crash_scale = 0.40
# Vol targeting scaler using realized vol of SPY as a crude market proxy
spy_vol = compute_volatility(spy_series, self.vol_lookback)
if spy_vol is None or spy_vol <= 0:
vol_scale = 1.0
else:
# daily vol -> annual approx
spy_annual_vol = spy_vol * sqrt(252.0)
vol_scale = clamp(self.target_annual_vol / spy_annual_vol, 0.35, 1.25)
total_scale = clamp(vol_scale * crash_scale, 0.25, 1.00)
# Final targets
targets = {s: w * total_scale for s, w in capped.items()}
# Allocate leftover to SHY so total = 1.0 (no leverage; avoids buying power errors)
used = sum(targets.values())
cash_w = clamp(1.0 - used, 0.0, 1.0)
targets[self.shy] = targets.get(self.shy, 0.0) + cash_w
# Place trades with thresholding
self._execute_targets(targets)
self.Plot("Regime", "RiskOn", 1 if risk_on_regime else 0)
self.Plot("Risk", "SpyDrawdown", dd)
self.Plot("Risk", "TotalScale", total_scale)
self.Debug(
f"{self.Time.date()} Rebalance | RiskOn={risk_on_regime} "
f"SPY={spy_last:.2f} SMA200={spy_sma200:.2f} Mom12={spy_mom12:.3f} DD6m={dd:.3f} "
f"Scale={total_scale:.2f} Sel={len(selected)}"
)
def _select_by_dual_momentum(self, closes, universe, top_k: int):
scored = []
for s in universe:
if s not in closes.columns:
continue
series = closes[s].dropna()
if len(series) < max(self.mom_lookback_12m, self.mom_lookback_3m) + 2:
continue
m12 = compute_momentum_score(series, self.mom_lookback_12m)
m3 = compute_momentum_score(series, self.mom_lookback_3m)
# Dual momentum blend (more weight to 12m to reduce churn)
score = 0.7 * m12 + 0.3 * m3
# Hard filter: require positive 3m OR positive 12m (prevents obvious losers)
if (m12 <= 0) and (m3 <= 0):
continue
scored.append((score, s))
scored.sort(reverse=True, key=lambda x: x[0])
return [s for _, s in scored[:top_k]]
def _drawdown_from_recent_high(self, series, lookback: int) -> float:
if series is None or len(series) < lookback + 2:
return 0.0
window = series.iloc[-lookback:]
peak = float(window.max())
last = float(window.iloc[-1])
if peak <= 0:
return 0.0
return max(0.0, (peak - last) / peak)
def _execute_targets(self, targets: dict) -> None:
# Liquidate anything not in targets (but do it safely)
invested = [kv.Key for kv in self.Portfolio if self.Portfolio[kv.Key].Invested]
for sym in invested:
if sym not in targets:
self.Liquidate(sym)
# Place target trades with thresholding and minimum order value
total_value = float(self.Portfolio.TotalPortfolioValue)
for sym, tgt_w in targets.items():
if sym == self.spy:
pass
tgt_w = clamp(tgt_w, 0.0, 1.0)
cur_w = 0.0
if self.Portfolio[sym].Invested and total_value > 0:
cur_w = float(self.Portfolio[sym].HoldingsValue) / total_value
delta = tgt_w - cur_w
if abs(delta) < self.min_trade_weight_change:
continue
# Skip tiny order values
if abs(delta) * total_value < self.min_order_value:
continue
# Use SetHoldings for simplicity and to reduce buying power mistakes
self.SetHoldings(sym, tgt_w)
from AlgorithmImports import *
import numpy as np
def clamp(x: float, lo: float, hi: float) -> float:
return max(lo, min(hi, x))
def clean_history_close(history):
"""
Returns a wide DataFrame: columns are Symbol, rows are time, values are close.
Works with QC pandas History output.
"""
if history is None:
return None
try:
# QC History can come as multiindex: (symbol, time)
df = history
if hasattr(df, "empty") and df.empty:
return None
# If multi-index with 'close' column
if "close" in df.columns:
close = df["close"]
# close is Series with multiindex -> unstack to columns
wide = close.unstack(level=0)
return wide
# Sometimes it's already wide
return df
except Exception:
return None
def compute_momentum_score(price_series, lookback: int):
"""
Simple total return over lookback days.
Returns float, positive means uptrend.
"""
if price_series is None:
return None
if len(price_series) < lookback + 2:
return None
p0 = float(price_series.iloc[-lookback-1])
p1 = float(price_series.iloc[-1])
if p0 <= 0:
return None
return (p1 / p0) - 1.0
def compute_volatility(price_series, lookback: int):
"""
Realized daily volatility of log returns over lookback days.
Returns stdev (daily).
"""
if price_series is None:
return None
if len(price_series) < lookback + 2:
return None
px = np.array(price_series.iloc[-(lookback+1):], dtype=float)
if np.any(px <= 0):
return None
rets = np.diff(np.log(px))
if len(rets) < 2:
return None
return float(np.std(rets, ddof=1))
def inv_vol_weights(vols: dict):
"""
vols: {symbol: daily_vol}
returns normalized inverse-vol weights
"""
inv = {}
for s, v in vols.items():
if v is None or v <= 0:
continue
inv[s] = 1.0 / v
ssum = sum(inv.values())
if ssum <= 0:
# fallback: equal weights
n = max(1, len(vols))
return {s: 1.0 / n for s in vols.keys()}
return {s: w / ssum for s, w in inv.items()}