| Overall Statistics |
|
Total Orders 5910 Average Win 1.03% Average Loss -0.82% Compounding Annual Return 15.678% Drawdown 31.000% Expectancy 0.141 Start Equity 100000 End Equity 1378200.46 Net Profit 1278.200% Sharpe Ratio 0.57 Sortino Ratio 0.622 Probabilistic Sharpe Ratio 2.862% Loss Rate 49% Win Rate 51% Profit-Loss Ratio 1.25 Alpha 0.06 Beta 0.707 Annual Standard Deviation 0.186 Annual Variance 0.034 Information Ratio 0.266 Tracking Error 0.154 Treynor Ratio 0.15 Total Fees $156470.97 Estimated Strategy Capacity $110000000.00 Lowest Capacity Asset SHV TP8J6Z7L419H Portfolio Turnover 71.52% |
##########################################################################
# Reweighted Price Relative Tracking (RPRT) Strategy
# ---------------------------------------------
# Reference: "Reweighted Price Relative Tracking System for Automatic
# Portfolio Optimization" by Zhao-Rong Lai, et al. (IEEE TSMC:Systems, 2018).
# This implementation focuses on the "broad_etfs" variant.
#
# Strategy Overview:
# ------
# Tracks and reweights asset price relatives using a moving window approach
# Dynamically adjusts portfolio weights based on asset performance trends
# Projects portfolio weights onto probability simplex for valid allocations
#
# Parameters:
# -----
# window_size: Lookback window for SMA of price relatives
# theta: Mixing parameter for reweighted price relative
# eps: Reversion threshold
# ................................................................
# Inpsired by / Ported from:
# Original research Paper (Zhao-Rong et al): https://ieeexplore.ieee.org/document/8411138/
##########################################################################
# region imports
from AlgorithmImports import *
import numpy as np
from collections import deque
import math
# endregion
class RPRTAlgorithm(QCAlgorithm):
"""
QuantConnect implementation of the Reweighted Price Relative Tracking (RPRT) strategy.
Reference: "Reweighted Price Relative Tracking System for Automatic Portfolio Optimization" by Zhao-Rong Lai, et al. (IEEE TSMC:Systems, 2018).
This implementation focuses on the "broad_etfs" variant identified in the analysis.
Inpsired by / Ported from:
- Original research Paper (Zhao-Rong et al): https://ieeexplore.ieee.org/document/8411138/
"""
def Initialize(self):
"""
Initialise the data and resolution required, along with strategy parameters.
"""
self.SetStartDate(2007, 1, 1) # Set start date (adjust as needed)
self.SetEndDate(2025, 1, 1)
self.SetCash(100000) # Set strategy cash
self.SetBenchmark("SPY")
# === Strategy Parameters ===
# Using optimized parameters for "broad_etfs" from the analysis [cite: 51]
self.window_size = 10 # Lookback window for SMA of price relatives [cite: 51]
self.theta = 0.8 # Mixing parameter for reweighted price relative [cite: 51]
self.eps = 5.0 # Reversion threshold [cite: 51]
# === Universe Definition ===
# Broad ETFs variant tickers [cite: 119]
self.tickers = ["SPY", "QQQ", "IWM", "EFA", "EEM", "TLT", "SHV"]
self.symbols = []
for ticker in self.tickers:
symbol = self.AddEquity(ticker, Resolution.Daily).Symbol
self.symbols.append(symbol)
# === State Variables ===
self.n_assets = len(self.symbols)
# Dictionary to hold current target portfolio weights {Symbol: weight}
self.portfolio_weights = {symbol: 1.0 / self.n_assets for symbol in self.symbols}
# Dictionary to hold previous reweighted price relatives {Symbol: phi_prev}
self.phi_prev = {symbol: 1.0 for symbol in self.symbols}
# Dictionary to hold rolling buffer of daily price relatives {Symbol: deque(maxlen=window_size)}
self.price_relatives_buffer = {symbol: deque(maxlen=self.window_size) for symbol in self.symbols}
# Dictionary to hold the last known closing price {Symbol: price}
self.last_close = {}
# Flag to track if initial portfolio has been set
self.initial_portfolio_set = False
# === Warm-up Period ===
# Required to populate the rolling window buffer
self.SetWarmUp(self.window_size)
self.Debug(f"Initialization complete. Universe: {self.tickers}, Window Size: {self.window_size}, Theta: {self.theta}, Epsilon: {self.eps}")
def simplex_proj_euclidean(self, v):
"""
Project vector v onto the probability simplex { x >= 0, sum(x) = 1 }
using Euclidean projection. Ensures portfolio weights are valid.
Reference: Duchi et al. (2008). Efficient Projections onto the l1-Ball for Learning in High Dimensions
Code adapted from the provided notebook[cite: 99].
Args:
v (np.array): Input vector (potentially unconstrained weights).
Returns:
np.array: Projected vector (valid portfolio weights).
"""
v = np.array(v, dtype=float)
n = len(v)
# Check if already on simplex
if np.sum(v) == 1.0 and np.all(v >= 0):
return v
# Sort in descending order
u = np.sort(v)[::-1]
cumsum_u = np.cumsum(u)
rho = -1
for i in range(n):
if u[i] + (1.0 - cumsum_u[i]) / (i + 1) > 0:
rho = i
# If rho not found (should not happen with valid inputs but handle robustly)
if rho == -1:
self.Log(f"Warning: Simplex projection fallback triggered for input: {v}")
return np.ones(n) / n
# Calculate theta
theta = (1.0 - cumsum_u[rho]) / (rho + 1)
# Compute projection
w = np.maximum(v + theta, 0)
# Re-normalize to handle potential floating point inaccuracies
s = w.sum()
if s > 1e-12: # Check if sum is significantly non-zero
w = w / s
else:
# Fallback if sum is near zero (e.g., all inputs were negative)
self.Log(f"Warning: Simplex projection resulted in near-zero sum for input: {v}. Using uniform weights.")
w = np.ones(n) / n
return w
def OnData(self, data):
"""
Event handler called AlgorithmManager fills a TradeBar array or Slice object
"""
# Wait for warm-up period to complete
if self.IsWarmingUp:
return
# --- Data Check ---
# Check if data for all symbols is present
missing_data = False
current_closes = {}
for symbol in self.symbols:
if not data.ContainsKey(symbol) or data[symbol] is None or data[symbol].Price == 0:
missing_data = True
self.Log(f"Warning: Missing or zero price data for {symbol} on {self.Time.date()}")
break
current_closes[symbol] = data[symbol].Close
# If data is missing for any symbol, hold the current portfolio weights and do nothing
if missing_data:
self.Log(f"Holding previous weights due to missing data on {self.Time.date()}")
return
# --- Initialization on First Data Point ---
if not self.last_close:
# First data point after warm-up
self.last_close = current_closes
# Initialize phi_prev to 1.0 for all assets [cite: 110] (implied from notebook logic)
self.phi_prev = {symbol: 1.0 for symbol in self.symbols}
# Initialize portfolio weights equally [cite: 105] (implicit in notebook init)
initial_weight = 1.0 / self.n_assets
self.portfolio_weights = {symbol: initial_weight for symbol in self.symbols}
self.Log(f"First data point after warm-up on {self.Time.date()}. Initializing last close prices, phi_prev, and setting initial equal weights.")
self.SetHoldings([PortfolioTarget(symbol, initial_weight) for symbol in self.symbols])
self.initial_portfolio_set = True
return # Return after setting initial state
# Ensure the initial portfolio was set (should always be true after first data point)
if not self.initial_portfolio_set:
self.Error("Initial portfolio weights were not set. This should not happen.")
return
# --- Calculate Price Relatives (x_t) ---
price_relatives = {}
for symbol in self.symbols:
# Avoid division by zero if last close was somehow zero
if self.last_close[symbol] == 0:
self.Error(f"Zero last close price encountered for {symbol} on {self.Time.date()}. Cannot calculate relative.")
return # Cannot proceed this step
price_relatives[symbol] = current_closes[symbol] / self.last_close[symbol]
# Update last close prices for the next iteration
self.last_close = current_closes
# --- Update Rolling Buffers ---
for symbol in self.symbols:
self.price_relatives_buffer[symbol].append(price_relatives[symbol])
# Check if buffers are full (should be after warm-up)
if len(self.price_relatives_buffer[self.symbols[0]]) < self.window_size:
self.Log(f"Buffers not yet full on {self.Time.date()}. Length: {len(self.price_relatives_buffer[self.symbols[0]])}")
# Should not happen after warm-up, but good failsafe
return
# --- Calculate SMA of Price Relatives (x_sma) ---
sma_relatives = {}
for symbol in self.symbols:
sma_relatives[symbol] = sum(self.price_relatives_buffer[symbol]) / self.window_size
# --- Calculate Next Reweighted Price Relative (phi_next) --- [cite: 113]
phi_next = {}
for symbol in self.symbols:
x_t_i = price_relatives[symbol]
phi_prev_i = self.phi_prev[symbol]
denominator = self.theta * x_t_i + phi_prev_i
if abs(denominator) < 1e-12:
# Fallback for numerical stability [cite: 113]
self.Log(f"Warning: Near-zero denominator in phi_next calculation for {symbol} on {self.Time.date()}. Setting phi_next to 1.0.")
phi_next[symbol] = 1.0
else:
gamma_i = (self.theta * x_t_i) / denominator
if abs(x_t_i) < 1e-12:
# Avoid division by zero in ratio [cite: 114]
self.Log(f"Warning: Near-zero price relative x_t for {symbol} on {self.Time.date()}. Using large ratio fallback.")
ratio = 1e6 # Artificially large as per notebook [cite: 114]
else:
ratio = phi_prev_i / x_t_i
phi_next[symbol] = gamma_i + (1.0 - gamma_i) * ratio
# --- Calculate Step Size (lambda) --- [cite: 116]
# Convert dictionaries to numpy arrays for vector math, maintaining symbol order
phi_next_vec = np.array([phi_next[s] for s in self.symbols])
portfolio_weights_vec = np.array([self.portfolio_weights[s] for s in self.symbols])
# Mean of predicted relatives [cite: 115]
phi_bar = phi_next_vec.mean()
# Difference vector
diff_phi = phi_next_vec - phi_bar
# Norm of difference vector [cite: 115]
norm_phi_sq = np.linalg.norm(diff_phi)**2 # Use squared norm as per paper/notebook [cite: 116]
# Predicted return using current portfolio weights [cite: 116]
pred_ret = np.dot(portfolio_weights_vec, phi_next_vec)
gap = self.eps - pred_ret
lam = 0.0 # Default lambda is 0
if gap > 0 and norm_phi_sq > 1e-12: # Only adjust if predicted return is below eps and norm is non-zero [cite: 116]
lam = gap / norm_phi_sq
# Ensure lambda is not excessively large (add safety)
lam = min(lam, 1e6) # Prevent overly large steps if norm_phi_sq is tiny
# --- Calculate New Portfolio Weights (b_new) ---
sma_relatives_vec = np.array([sma_relatives[s] for s in self.symbols])
# Element-wise multiplication for the step vector component [cite: 116]
step_vec_component = sma_relatives_vec * diff_phi
# Calculate temporary unconstrained weights [cite: 116]
b_temp = portfolio_weights_vec + lam * step_vec_component
# Project temporary weights onto the probability simplex [cite: 116]
b_new_vec = self.simplex_proj_euclidean(b_temp)
# --- Update State and Set Portfolio Targets ---
# Convert projected weights back to dictionary
new_weights = {symbol: weight for symbol, weight in zip(self.symbols, b_new_vec)}
# Update internal state for the next iteration
self.portfolio_weights = new_weights
self.phi_prev = phi_next # Use the calculated phi_next as phi_prev for the next step [cite: 117]
# --- Logging ---
if self.Time.minute == 0 and self.Time.hour == 0 : # Log once per day
self.Debug(f"--- Daily Update {self.Time.date()} ---")
# self.Debug(f" Price Relatives (x_t): { {s.Value: round(price_relatives[s], 4) for s in self.symbols} }")
# self.Debug(f" SMA Relatives (x_sma): { {s.Value: round(sma_relatives[s], 4) for s in self.symbols} }")
# self.Debug(f" Reweighted Relatives (phi_next): { {s.Value: round(phi_next[s], 4) for s in self.symbols} }")
self.Debug(f" Predicted Return: {pred_ret:.4f}, Epsilon: {self.eps}, Gap: {gap:.4f}")
self.Debug(f" Phi Norm Sq: {norm_phi_sq:.4f}, Lambda: {lam:.4f}")
self.Debug(f" Target Weights (b_new): { {s.Value: round(new_weights[s], 4) for s in self.symbols} }")
self.Debug(f" Sum of Target Weights: {sum(new_weights.values()):.4f}") # Should be very close to 1.0
# --- Execute Rebalancing ---
# Generate portfolio targets from the new weights
targets = [PortfolioTarget(symbol, weight) for symbol, weight in self.portfolio_weights.items()]
# Use SetHoldings to rebalance the portfolio to the target weights
# This handles calculating order sizes and placing trades
self.SetHoldings(targets)