Overall Statistics
Total Orders
2753
Average Win
0.57%
Average Loss
-0.39%
Compounding Annual Return
25.397%
Drawdown
22.200%
Expectancy
0.427
Start Equity
100000
End Equity
984651.68
Net Profit
884.652%
Sharpe Ratio
0.86
Sortino Ratio
1
Probabilistic Sharpe Ratio
36.699%
Loss Rate
42%
Win Rate
58%
Profit-Loss Ratio
1.45
Alpha
0.1
Beta
0.852
Annual Standard Deviation
0.191
Annual Variance
0.037
Information Ratio
0.617
Tracking Error
0.144
Treynor Ratio
0.193
Total Fees
$10452.54
Estimated Strategy Capacity
$30000000.00
Lowest Capacity Asset
REM TSBSPJJNRAED
Portfolio Turnover
7.56%
Drawdown Recovery
275
"""Helpers package for QuantConnect Web IDE.
Re-export wrapper-friendly names so the main file can import thin delegates.
"""

from .analytics import (
    print_monthly_metrics as _helpers_print_monthly_metrics,
    print_sub_portfolio_metrics as _helpers_print_sub_portfolio_metrics,
    print_sub_portfolio_audit as _helpers_print_sub_portfolio_audit,
    print_positions as _helpers_print_positions,
    update_sub_portfolio_navs as _helpers_update_sub_portfolio_navs,
)
from .ranking import (
    get_rank as _helpers_get_rank,
    update_ranking_snapshot as _helpers_update_ranking_snapshot,
)
from .data_sources import prewarm_minute_data as _helpers_prewarm_minute_data
from .rebalance import rebalance_moc as _helpers_rebalance
from .fred import Fred as _helpers_Fred
from .costs import apply_margin_and_borrow_costs as _helpers_apply_margin_and_borrow_costs
from .performance import (
    configure_perf_reporting as _helpers_perf_configure,
    execution_snapshot as _helpers_perf_execution_snapshot,
    on_order_event as _helpers_perf_on_order_event,
    eom_snapshot as _helpers_perf_eom_snapshot,
    print_end_of_algorithm_perf as _helpers_perf_on_end_of_algorithm,
    export_perf_csv as _helpers_perf_export_csv,
)
from .config import (
    EQUITY_LIST,
    BOND_LIST,
    FRED_DEFAULT_SERIES,
    LOG_OVERRIDES,
    add_equity_list,
    add_core_instruments,
    configure_universe,
    configure_logging_flags,
)

__all__ = [
    '_helpers_print_monthly_metrics',
    '_helpers_print_sub_portfolio_metrics',
    '_helpers_print_sub_portfolio_audit',
    '_helpers_print_positions',
    '_helpers_update_sub_portfolio_navs',
    '_helpers_get_rank',
    '_helpers_update_ranking_snapshot',
    '_helpers_prewarm_minute_data',
    '_helpers_rebalance',
    '_helpers_Fred',
    '_helpers_apply_margin_and_borrow_costs',
    '_helpers_perf_configure',
    '_helpers_perf_execution_snapshot',
    '_helpers_perf_on_order_event',
    '_helpers_perf_eom_snapshot',
    '_helpers_perf_on_end_of_algorithm',
    '_helpers_perf_export_csv',
    'EQUITY_LIST',
    'BOND_LIST',
    'FRED_DEFAULT_SERIES',
    'LOG_OVERRIDES',
    'add_equity_list',
    'add_core_instruments',
    'configure_universe',
    'configure_logging_flags',
]
"""
Analytics helpers: NAV, MaxDD, Sharpe, monthly metrics, CSV writer
Copy into QC Web IDE and adapt to your fields; call from your main algorithm
like: from helpers.analytics import print_monthly_metrics
"""

from math import sqrt
import math
from typing import Any, List
import os

# Import Lean types when running in Web IDE; ignore locally
try:
    from AlgorithmImports import *  # type: ignore
except Exception:
    # Minimal fallbacks for local linting
    class Resolution:  # type: ignore
        Daily = 0
    pass
import numpy as np


def print_sub_portfolio_metrics(algo: Any) -> None:
    """Log per-sub-portfolio metrics (CAGR, MaxDD, Alpha vs benchmark, Beta).
    Extracted from Main.print_sub_portfolio_metrics to reduce main.py size.
    """
    if not getattr(algo, 'sub_port_metrics', False) or not hasattr(algo, 'sub_portfolios'):
        return
    try:
        import pandas as pd  # type: ignore
    except Exception:
        pd = None
    date_str = algo.Time.strftime("%Y-%m-%d")
    bench = getattr(algo, 'BENCHMARK_SYMBOL', None)
    rf_annual = float(getattr(algo, 'fedfunds_rate', 0.0))  # e.g., 0.05 for 5%
    rf_daily = rf_annual / 252.0
    for period in getattr(algo, 'LONG_PERIODS', []):
        nav_hist = getattr(algo, 'sub_portfolio_nav_history', {}).get(period, [])
        time_hist = getattr(algo, 'sub_portfolio_nav_timeline', {}).get(period, [])
        if not nav_hist or len(nav_hist) < 30:
            continue
        try:
            start_nav = float(nav_hist[0])
            end_nav = float(nav_hist[-1])
            # Duration in years
            if time_hist and len(time_hist) == len(nav_hist):
                years = max((time_hist[-1] - time_hist[0]).days, 1) / 365.25
            else:
                years = max((len(nav_hist) - 1), 1) / 252.0
            cagr = (end_nav / start_nav) ** (1.0 / years) - 1.0 if start_nav > 0 and years > 0 else float('nan')
            # Max drawdown from NAV series
            peak = -float('inf')
            max_dd = 0.0
            for v in nav_hist:
                v = float(v)
                if v > peak:
                    peak = v
                if peak > 0:
                    dd = (v - peak) / peak
                    if dd < max_dd:
                        max_dd = dd
            max_dd_pct = max_dd * 100.0  # negative number
            # Alpha vs benchmark via daily regression of excess returns
            # Sub daily returns from NAV
            sub_rets = []
            for i in range(1, len(nav_hist)):
                prev = float(nav_hist[i-1])
                cur = float(nav_hist[i])
                if prev > 0:
                    sub_rets.append((cur - prev) / prev)
            alpha_ann = float('nan')
            beta = float('nan')
            bench_tkr = bench.Value if hasattr(bench, 'Value') else (str(bench) if bench else None)
            if bench and pd is not None and len(sub_rets) >= 30:
                try:
                    start_dt = time_hist[0] if time_hist else algo.StartDate
                    hist = algo.History([bench], start_dt, algo.Time, Resolution.Daily)
                    if hist is not None and not hist.empty:
                        try:
                            s = hist.loc[bench]
                        except Exception:
                            s = hist
                        close = s['close'] if 'close' in getattr(s, 'columns', []) else getattr(s, 'close', None)
                        if close is not None:
                            bench_series = close.dropna()
                            bench_rets = bench_series.pct_change().dropna()
                            # Align by DATE (not timestamp) to avoid timezone/session mismatches
                            if time_hist and pd is not None:
                                # Sub returns indexed by date
                                sub_dates = []
                                for dt in time_hist[1:1+len(sub_rets)]:
                                    try:
                                        # Normalize to naive dt and take date
                                        d0 = pd.Timestamp(dt)
                                        if d0.tzinfo is not None:
                                            d0 = d0.tz_convert(algo.TimeZone).tz_localize(None)
                                        d0 = d0.date()
                                    except Exception:
                                        try:
                                            d0 = pd.Timestamp(dt).date()
                                        except Exception:
                                            d0 = None
                                    if d0 is not None:
                                        sub_dates.append(d0)
                                sub_series = pd.Series(sub_rets, index=pd.Index(sub_dates, name='date'), name='rp')

                                # Benchmark returns indexed by date
                                b_idx = pd.to_datetime(getattr(bench_rets, 'index', []))
                                try:
                                    # Convert to algorithm tz, then drop tz and keep date
                                    b_dates = []
                                    for t in b_idx:
                                        tt = pd.Timestamp(t)
                                        if tt.tzinfo is not None:
                                            tt = tt.tz_convert(algo.TimeZone).tz_localize(None)
                                        b_dates.append(tt.date())
                                    bench_df = pd.DataFrame({'rm': bench_rets.values}, index=pd.Index(b_dates, name='date'))
                                except Exception:
                                    bench_df = pd.DataFrame({'rm': bench_rets.values}, index=pd.Index([pd.Timestamp(t).date() for t in b_idx], name='date'))
                                # Collapse duplicates by keeping the last of the day
                                bench_df = bench_df.groupby(level=0).last()

                                joined = sub_series.to_frame().join(bench_df, how='inner').dropna()
                                if len(joined) >= 30:
                                    y = joined['rp'].values - rf_daily
                                    x = joined['rm'].values - rf_daily
                                    if len(x) == len(y) and len(x) >= 2:
                                        # Simple OLS via numpy.polyfit
                                        b1, b0 = np.polyfit(x, y, 1)  # slope, intercept
                                        beta = float(b1)
                                        alpha_daily = float(b0)
                                        alpha_ann = alpha_daily * 252.0
                                else:
                                    try:
                                        algo.Debug(f"[metrics] Insufficient overlap for alpha/beta on SP-{period}: {len(joined)} days")
                                    except Exception:
                                        pass
                except Exception:
                    pass
            # Log metrics
            cagr_pct = cagr * 100.0 if not np.isnan(cagr) else float('nan')
            alpha_pct = alpha_ann * 100.0 if not np.isnan(alpha_ann) else float('nan')
            bench_label = bench_tkr or 'Benchmark'
            algo.Log(f"=== Sub-Portfolio {period} Metrics ===")
            algo.Log(f"{date_str}, CAGR: {cagr_pct:.2f}%, MaxDD: {max_dd_pct:.2f}%, Alpha({bench_label}): {alpha_pct:.2f}% ann., Beta: {beta:.2f}")
        except Exception:
            # Skip this sub on any calculation error
            continue
    try:
        algo.Debug("")
    except Exception:
        pass


def print_sub_portfolio_audit(algo: Any) -> None:
    """Detailed sub-portfolio audit print.
    Mirrors Main.print_sub_portfolio_audit but callable from helpers to keep main.py slim.
    """
    if not getattr(algo, 'sub_port_audit', False) or not hasattr(algo, 'sub_portfolios'):
        return
    date_str = getattr(algo, 'Time', None).strftime("%Y-%m-%d") if getattr(algo, 'Time', None) else ""
    try:
        total_nav = float(sum(getattr(algo, 'sub_portfolio_nav', {}).values())) if hasattr(algo, 'sub_portfolio_nav') else float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0))
    except Exception:
        total_nav = float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0))
    if total_nav <= 0:
        return
    # Precompute sub weight vs total
    sub_vs_total = {}
    for p in getattr(algo, 'sub_portfolios', {}).keys():
        try:
            sub_vs_total[p] = (float(getattr(algo, 'sub_portfolio_nav', {}).get(p, 0.0)) / total_nav) if hasattr(algo, 'sub_portfolio_nav') else 0.0
        except Exception:
            sub_vs_total[p] = 0.0
    # Build per-symbol actual total percent-of-total
    actual_total_pct = {}
    pv = float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0)) or total_nav
    try:
        for sym in list(getattr(algo.Portfolio, 'Keys', [])):
            h = algo.Portfolio[sym]
            val = float(h.HoldingsValue)
            if pv > 0 and val != 0:
                pct = 100.0 * (val / pv)
                actual_total_pct[sym] = -abs(pct) if h.Quantity < 0 else abs(pct)
    except Exception:
        pass
    # Build per-symbol per-sub target shares (as percent of total), and totals per symbol
    per_sub_weights = getattr(algo, 'sub_portfolio_weights', {}) if hasattr(algo, 'sub_portfolio_weights') else {}
    symbol_total_target_share = {}
    symbol_sub_share = {}
    for p, wmap in per_sub_weights.items():
        for sym, w in wmap.items():
            share = float(w) * float(sub_vs_total.get(p, 0.0))
            symbol_total_target_share[sym] = symbol_total_target_share.get(sym, 0.0) + share
            if sym not in symbol_sub_share:
                symbol_sub_share[sym] = {}
            symbol_sub_share[sym][p] = share
    # Now print per sub
    for period, sub in getattr(algo, 'sub_portfolios', {}).items():
        try:
            sub_nav = float(getattr(algo, 'sub_portfolio_nav', {}).get(period, 0.0)) if hasattr(algo, 'sub_portfolio_nav') else 0.0
        except Exception:
            sub_nav = 0.0
        if sub_nav <= 0:
            continue
        sub_total_pct = 100.0 * float(sub_vs_total.get(period, 0.0))
        algo.Log(f"=== Sub-Portfolio {period} Audit ===")
        algo.Log(f"{date_str}, SubNAV={sub_nav:.2f}, Sub/Total={sub_total_pct:.2f}%")
        sub_weights = per_sub_weights.get(period, {})
        if sub_weights:
            # Sort by target descending within sub
            for sym, w in sorted(sub_weights.items(), key=lambda x: (-x[1], x[0].Value if hasattr(x[0], 'Value') else str(x[0]))):
                target_pct_of_sub = 100.0 * float(w)
                # Partition actual across subs proportional to target shares
                act_tot = actual_total_pct.get(sym, None)
                if act_tot is not None:
                    total_share = symbol_total_target_share.get(sym, 0.0)
                    sub_share = symbol_sub_share.get(sym, {}).get(period, 0.0)
                    if total_share > 0 and sub_vs_total.get(period, 0.0) > 0:
                        actual_pct_of_sub = (act_tot * (sub_share / total_share)) / sub_vs_total[period]
                    else:
                        actual_pct_of_sub = 0.0
                    if sym in getattr(algo, 'Portfolio', {}) and algo.Portfolio[sym].Quantity < 0:
                        actual_pct_of_sub = -abs(actual_pct_of_sub)
                else:
                    actual_pct_of_sub = 0.0
                algo.Log(f"{date_str}, {sym.Value if hasattr(sym,'Value') else str(sym)}, Actual: {actual_pct_of_sub:.2f}%, Target: {target_pct_of_sub:.2f}%")
        else:
            # Fallback: no targets recorded for this sub — list held symbols in this sub’s sets with actual as percent of sub NAV
            tops = sub.get('topranked', []) or []
            bots = sub.get('bottomranked', []) or []
            held = [s for s in list(set(tops + bots)) if s in getattr(algo, 'Portfolio', {}) and algo.Portfolio[s].Quantity != 0]
            for s in held:
                h = algo.Portfolio[s]
                val = float(h.HoldingsValue)
                pct = 100.0 * (val / sub_nav) if sub_nav else 0.0
                if h.Quantity < 0:
                    pct = -abs(pct)
                algo.Log(f"{date_str}, {s.Value if hasattr(s,'Value') else str(s)}, Actual: {pct:.2f}%")

def compute_nav(algo: Any) -> float:
    # Portfolio value over starting capital
    try:
        pv = float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0))
        sc = float(getattr(algo, 'StartingCapital', 0.0) or 1.0)
        return pv / sc
    except Exception:
        return float('nan')


def compute_max_drawdown(equity_curve: List[float]) -> float:
    max_dd = 0.0
    peak = float('-inf')
    for v in equity_curve:
        peak = max(peak, v)
        if peak > 0:
            dd = (peak - v) / peak
            if dd > max_dd:
                max_dd = dd
    return max_dd


def compute_sharpe(daily_returns: List[float], risk_free_rate_annual: float = 0.0) -> float:
    if not daily_returns:
        return float('nan')
    mean = sum(daily_returns) / len(daily_returns)
    var = sum((r - mean) ** 2 for r in daily_returns) / max(1, (len(daily_returns) - 1))
    std = sqrt(var)
    if std == 0:
        return float('nan')
    # Convert to annualized assuming ~252 trading days
    return (mean - risk_free_rate_annual / 252.0) / std * sqrt(252.0)


def _max_drawdown(values: List[float]) -> float:
    peak = None
    max_dd = 0.0
    for v in values:
        if peak is None or v > peak:
            peak = v
        if peak and peak > 0:
            dd = (peak - v) / peak
            if dd > max_dd:
                max_dd = dd
    return max_dd


def _sharpe_ratio(returns: List[float], periods_per_year: int = 252, risk_free: float = 0.0) -> float:
    if not returns:
        return 0.0
    if len(returns) < 2:
        return 0.0
    mean = sum(returns) / len(returns)
    var = sum((r - mean) ** 2 for r in returns) / (len(returns) - 1)
    std = var ** 0.5
    if std == 0:
        return 0.0
    # Excess daily returns over RF
    mean_excess = mean - risk_free / periods_per_year
    return (mean_excess / std) * (periods_per_year ** 0.5)


def _write_metrics_csv(algo: Any, rows: List[List[str]]) -> None:
    if not getattr(algo, 'WRITE_ANALYTICS_CSV', False):
        return
    try:
        root = os.path.dirname(os.path.abspath(__file__)) if '__file__' in globals() else ''
        out_dir = os.path.join(root, 'analytics') if root else 'analytics'
        if not os.path.exists(out_dir):
            os.makedirs(out_dir, exist_ok=True)
        out_path = os.path.join(out_dir, 'metrics.csv')
        write_header = not os.path.exists(out_path)
        with open(out_path, 'a', encoding='utf-8', newline='') as f:
            if write_header:
                f.write('date,sub_port,long_period,nav,net_profit,profit_pct,max_drawdown_pct,sharpe\n')
            for r in rows:
                f.write(','.join(str(x) for x in r) + '\n')
    except Exception as e:
        try:
            algo.Debug(f"[metrics.csv] write failed: {e}")
        except Exception:
            pass


# --- Position reporting and NAV updater moved out of main.py to keep it under QC file size cap ---
def print_positions(algo: Any) -> None:
    """Month-end position report.
    Prints totals (PV, Cash $ and %) and, when not abbreviated, per‑holding lines with Qty, $ Value,
    Actual %, optional Target %, and integer ordinal rnk (1 = best) from the latest ranking snapshot.
    """
    try:
        if not (
            getattr(algo, 'POSITION_REPORTING_FLAG', False)
            or getattr(algo, 'abbreviated_positions_print', False)
            or getattr(algo, 'sub_port_positions', False)
        ):
            return
    except Exception:
        return
    try:
        total_value = getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0) or 1.0
    except Exception:
        total_value = 1.0
    try:
        cash_value = float(getattr(algo.Portfolio, 'Cash', 0.0) or 0.0)
    except Exception:
        cash_value = 0.0
    cash_pct = (100.0 * cash_value / total_value) if total_value else 0.0
    date_str = getattr(algo, 'Time', None).strftime("%Y-%m-%d") if getattr(algo, 'Time', None) else ""

    # Build integer ordinal ranking (1..N) map from latest snapshot, keep best across subs
    rnk_ord_map = {}
    rnk_ord_by_value = {}
    try:
        last_rank = getattr(algo, 'last_ranking', {}) if hasattr(algo, 'last_ranking') else {}
        for _p, pdata in getattr(last_rank, 'items', lambda: [])():
            ranks = pdata.get('ranks', {}) or {}
            ord_list = []
            for sym, rd in getattr(ranks, 'items', lambda: [])():
                try:
                    sc = float(rd.get('final_score', float('nan')))
                except Exception:
                    sc = float('nan')
                if sc == sc:
                    ord_list.append((sc, sym))
            ord_list.sort(key=lambda t: t[0])
            for idx, (_sc, sym) in enumerate(ord_list, start=1):
                prev = rnk_ord_map.get(sym, float('inf'))
                if idx < prev:
                    rnk_ord_map[sym] = idx
                    rnk_ord_by_value[getattr(sym, 'Value', str(sym))] = idx
    except Exception:
        rnk_ord_map = {}
        rnk_ord_by_value = {}

    # High-level counts
    if getattr(algo, 'POSITION_REPORTING_FLAG', False):
        long_positions = 0
        short_positions = 0
        try:
            for symbol in getattr(algo.Portfolio, 'Keys', []):
                q = algo.Portfolio[symbol].Quantity
                if q > 0:
                    long_positions += 1
                elif q < 0:
                    short_positions += 1
        except Exception:
            pass
        try:
            algo.Log(f"{algo.Time:%m/%d/%Y}\t{algo.Time.hour}\t0\t0\tLong\tPositions\t{long_positions}")
            algo.Log(f"{algo.Time:%m/%d/%Y}\t{algo.Time.hour}\t0\t0\tShort\tPositions\t{short_positions}")
        except Exception:
            pass
        if not getattr(algo, 'abbreviated_positions_print', False):
            try:
                # Optional regime status header (captured at T-60 in prewarm)
                try:
                    regime = getattr(algo, 'regime', None)
                    ratio = getattr(algo, 'regime_ratio', None)
                    ratio_sma = getattr(algo, 'regime_ratio_sma', None)
                    eq_last = getattr(algo, 'regime_eq', None)
                    fi_last = getattr(algo, 'regime_fi', None)
                    if regime is not None:
                        def _fmt(x, nd=6):
                            try:
                                xv = float(x)
                                if xv != xv:  # NaN check
                                    return "nan"
                                return f"{xv:.{nd}f}"
                            except Exception:
                                return "nan"
                        algo.Log(f"Regime Status: regime={regime}, ratio={_fmt(ratio)}, sma={_fmt(ratio_sma)}, eq={_fmt(eq_last, nd=2)}, fi={_fmt(fi_last, nd=2)}")
                except Exception:
                    pass
                algo.Log("=== Position Report (Totals) ===")
                algo.Log(f"{date_str}, PortfolioValue=${total_value:,.2f}, Cash=${cash_value:,.2f} ({cash_pct:.2f}% of PV)")
                algo.Log("=== Position Report (Holdings) ===")
            except Exception:
                pass
            # Build and sort items by abs weight
            items = []
            try:
                for sym in list(getattr(algo.Portfolio, 'Keys', [])):
                    h = algo.Portfolio[sym]
                    if h.Quantity == 0:
                        continue
                    val = float(h.HoldingsValue)
                    pct = 100.0 * (val / total_value) if total_value else 0.0
                    pct = (-abs(pct)) if h.Quantity < 0 else abs(pct)
                    rnk_int = rnk_ord_map.get(sym, rnk_ord_by_value.get(getattr(sym, 'Value', str(sym)), None))
                    items.append((sym, h, val, pct, rnk_int))
            except Exception:
                items = []
            items.sort(key=lambda x: -abs(x[3]))
            tgt_map = getattr(algo, 'last_aggregated_targets', {}) if hasattr(algo, 'last_aggregated_targets') else {}
            for sym, h, val, pct, rnk_int in items:
                target_pct = None
                try:
                    if sym in tgt_map:
                        target_pct = 100.0 * float(tgt_map[sym])
                except Exception:
                    target_pct = None
                if target_pct is not None and rnk_int is not None:
                    algo.Log(f"{date_str}, {sym.Value}, Qty={h.Quantity}, Value=${val:,.2f}, Actual={pct:.2f}%, Target={target_pct:.2f}%, rnk={int(rnk_int)}")
                elif target_pct is not None:
                    algo.Log(f"{date_str}, {sym.Value}, Qty={h.Quantity}, Value=${val:,.2f}, Actual={pct:.2f}%, Target={target_pct:.2f}%")
                elif rnk_int is not None:
                    algo.Log(f"{date_str}, {sym.Value}, Qty={h.Quantity}, Value=${val:,.2f}, Actual={pct:.2f}%, rnk={int(rnk_int)}")
                else:
                    algo.Log(f"{date_str}, {sym.Value}, Qty={h.Quantity}, Value=${val:,.2f}, Actual={pct:.2f}%")

    # Abbreviated per-symbol view
    if getattr(algo, 'abbreviated_positions_print', False):
        for symbol in getattr(algo.Portfolio, 'Keys', []):
            try:
                holding = algo.Portfolio[symbol]
                if holding.Quantity == 0:
                    continue
                pct = 100.0 * (holding.HoldingsValue / total_value)
                pct = -abs(pct) if holding.Quantity < 0 else abs(pct)
                target_pct = None
                try:
                    tgt_map = getattr(algo, 'last_aggregated_targets', {})
                    if symbol in tgt_map:
                        target_pct = 100.0 * float(tgt_map[symbol])
                except Exception:
                    target_pct = None
                rnk_int = rnk_ord_map.get(symbol, rnk_ord_by_value.get(getattr(symbol, 'Value', str(symbol)), None))
                if target_pct is not None and rnk_int is not None:
                    algo.Log(f"{date_str}, {symbol.Value}, Actual: {pct:.2f}%, Target: {target_pct:.2f}%, rnk={int(rnk_int)}")
                elif target_pct is not None:
                    algo.Log(f"{date_str}, {symbol.Value}, Actual: {pct:.2f}%, Target: {target_pct:.2f}%")
                elif rnk_int is not None:
                    algo.Log(f"{date_str}, {symbol.Value}, Actual: {pct:.2f}%, rnk={int(rnk_int)}")
                else:
                    algo.Log(f"{date_str}, {symbol.Value}, Actual: {pct:.2f}%")
            except Exception:
                continue

    # Sub-portfolio simple view
    if getattr(algo, 'sub_port_positions', False) and hasattr(algo, 'sub_portfolios'):
        for period, sub in getattr(algo, 'sub_portfolios', {}).items():
            tops = sub.get('topranked', []) or []
            bots = sub.get('bottomranked', []) or []
            held_top = [s for s in tops if s in getattr(algo, 'Portfolio', {}) and algo.Portfolio[s].Quantity != 0]
            held_bot = [s for s in bots if s in getattr(algo, 'Portfolio', {}) and algo.Portfolio[s].Quantity != 0]
            # Use sub-portfolio NAV as denominator when available
            try:
                denom = float(getattr(algo, 'sub_portfolio_nav', {}).get(period, 0.0))
            except Exception:
                denom = 0.0
            if denom <= 0:
                denom = total_value
            try:
                algo.Log(f"=== Sub-Portfolio {period} Positions ===")
            except Exception:
                pass
            sub_weights = getattr(algo, 'sub_portfolio_weights', {}).get(period, {}) if hasattr(algo, 'sub_portfolio_weights') else {}
            for s in held_top:
                if s in sub_weights:
                    pct = 100.0 * float(sub_weights.get(s, 0.0))
                else:
                    h = algo.Portfolio[s]
                    pct = 100.0 * (h.HoldingsValue / denom) if denom else 0.0
                algo.Log(f"{date_str}, {s.Value}, {pct:.2f}%")
            for s in held_bot:
                if s in sub_weights:
                    pct = -100.0 * abs(float(sub_weights.get(s, 0.0)))
                else:
                    h = algo.Portfolio[s]
                    pct = -100.0 * (abs(h.HoldingsValue) / denom) if denom else 0.0
                algo.Log(f"{date_str}, {s.Value}, {pct:.2f}%")


def update_sub_portfolio_navs(algo: Any) -> None:
    """Recompute sub-portfolio NAVs and drawdowns and plot them, plus overview NAV/DD.
    Extracted from main to keep main.py under size limits.
    """
    # Collect all symbols referenced by current weights
    all_syms = set()
    for w in getattr(algo, 'sub_portfolio_weights', {}).values():
        all_syms.update(w.keys())
    # Current closes
    cur_close = {}
    for sym in list(all_syms):
        try:
            if sym in algo.Securities and algo.Securities[sym].Price and algo.Securities[sym].Price > 0:
                cur_close[sym] = float(algo.Securities[sym].Price)
        except Exception:
            continue
    # Daily returns when possible
    sym_returns = {}
    if getattr(algo, '_prev_close', {}):
        for sym, px in cur_close.items():
            prev = getattr(algo, '_prev_close', {}).get(sym, None)
            if prev and prev > 0:
                sym_returns[sym] = (px - prev) / prev
    # Update each sub-portfolio
    plotted_count = 0
    series_limit_raw = int(getattr(algo, 'SUB_CHART_SERIES_LIMIT', 8) or 8)
    series_limit = max(1, min(series_limit_raw, int(getattr(algo, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
    for period in sorted(getattr(algo, 'sub_portfolio_weights', {}).keys()):
        weights = getattr(algo, 'sub_portfolio_weights', {}).get(period, {})
        w_ret = 0.0
        for sym, w in weights.items():
            r = sym_returns.get(sym, None)
            if r is not None:
                w_ret += float(w) * float(r)
        algo.sub_portfolio_nav[period] = float(getattr(algo, 'sub_portfolio_nav', {}).get(period, 0.0)) * (1.0 + w_ret)
        nav_val = algo.sub_portfolio_nav[period]
        getattr(algo, 'sub_portfolio_nav_history', {}).setdefault(period, []).append(nav_val)
        if hasattr(algo, 'sub_portfolio_nav_timeline'):
            getattr(algo, 'sub_portfolio_nav_timeline', {}).setdefault(period, []).append(getattr(algo, 'Time', None))
        # Plot NAV and drawdown series
        try:
            hist = getattr(algo, 'sub_portfolio_nav_history', {}).get(period, [])
            if hist:
                peak = max(hist)
                dd_pct = ((peak - nav_val) / peak * 100.0) if peak > 0 else 0.0
                if getattr(algo, 'SUB_CHART_SINGLE_PANEL', False):
                    suffix = ""
                else:
                    chart_index = plotted_count // series_limit
                    suffix = "" if chart_index == 0 else f"-{chart_index+1}"
                if nav_val is not None and nav_val > 0:
                    log_nav = math.log(float(nav_val))
                    algo.Plot(f"SubNAV{suffix}", f"SP-{period}", log_nav)
                algo.Plot(f"SubDrawdown{suffix}", f"SP-{period}", -dd_pct)
                plotted_count += 1
        except Exception:
            pass
    # Overview totals
    try:
        total_nav = sum(getattr(algo, 'sub_portfolio_nav', {}).values()) if hasattr(algo, 'sub_portfolio_nav') else 0.0
        algo.overview_nav = float(total_nav)
        if hasattr(algo, 'overview_nav_history'):
            getattr(algo, 'overview_nav_history', []).append(algo.overview_nav)
        if hasattr(algo, 'overview_nav_timeline'):
            getattr(algo, 'overview_nav_timeline', []).append(getattr(algo, 'Time', None))
        if getattr(algo, 'overview_nav_history', None):
            peak_total = max(getattr(algo, 'overview_nav_history'))
            total_dd_pct = ((peak_total - algo.overview_nav) / peak_total * 100.0) if peak_total > 0 else 0.0
            if algo.overview_nav > 0:
                algo.Plot("Overview", "TotalNAV (log)", math.log(float(algo.overview_nav)))
            dd_frac_total = float(total_dd_pct) / 100.0 if total_dd_pct is not None else 0.0
            if dd_frac_total >= 0.0 and dd_frac_total < 1.0:
                log_dd_total = -math.log(1.0 - dd_frac_total) if dd_frac_total > 0 else 0.0
                algo.Plot("Overview", "TotalDrawdown (logDD)", -100.0 * log_dd_total)
    except Exception:
        pass
    if cur_close:
        algo._prev_close = dict(cur_close)


def print_monthly_metrics(algo: Any) -> None:
    """Log a month-to-date snapshot of NAV-based metrics for each sub-portfolio.
    Mirrors the in-class implementation but callable from helpers.
    """
    if not getattr(algo, 'sub_portfolio_accounting', True):
        return
    if not hasattr(algo, 'sub_portfolio_nav_history'):
        return
    now = getattr(algo, 'Time', None)
    if now is None:
        return
    y, m = now.year, now.month
    algo.Log("=== Monthly Sub-Portfolio Metrics (NAV-based) ===")
    csv_rows: List[List[str]] = []
    long_periods = getattr(algo, 'LONG_PERIODS', [])
    n = len(long_periods)
    initial_sub_value = getattr(algo, 'initialcash', 0.0) / n if n else 0.0
    for i, period in enumerate(long_periods, 1):
        navs = getattr(algo, 'sub_portfolio_nav_history', {}).get(period, [])
        times = getattr(algo, 'sub_portfolio_nav_timeline', {}).get(period, [])[0:len(navs)]
        if not navs:
            continue
        # Narrow to this month
        if times:
            idxs = [k for k, t in enumerate(times) if t.year == y and t.month == m]
        else:
            idxs = list(range(len(navs)))
        equity = navs[-1]
        if idxs:
            start_idx, end_idx = idxs[0], idxs[-1]
            start_nav, end_nav = navs[start_idx], navs[end_idx]
            mtd = ((end_nav - start_nav) / start_nav * 100.0) if start_nav != 0 else 0.0
            month_navs = navs[start_idx:end_idx + 1]
            rets = []
            for j in range(1, len(month_navs)):
                prev = month_navs[j-1]
                cur = month_navs[j]
                rets.append((cur - prev) / prev if prev != 0 else 0.0)
            maxdd_pct = _max_drawdown(month_navs) * 100.0 if len(month_navs) > 1 else 0.0
            sharpe = _sharpe_ratio(rets) if len(rets) > 1 else 0.0
        else:
            mtd = 0.0
            maxdd_pct = 0.0
            sharpe = 0.0
        algo.Log(f"[Monthly] Sub-Portfolio {i} (LONG_PERIOD={period}): Equity=${equity:.2f}, MTD={mtd:.2f}%, MaxDD(M)={maxdd_pct:.2f}%, Sharpe(M)={sharpe:.2f}")
        # enqueue CSV row similar to OnEndOfAlgorithm summary
        net_profit = (equity - initial_sub_value) if initial_sub_value > 0 else 0.0
        profit_pct = (net_profit / initial_sub_value * 100.0) if initial_sub_value > 0 else 0.0
        csv_rows.append([now.strftime('%Y-%m-%d'), i, period, f"{equity:.2f}", f"{net_profit:.2f}", f"{profit_pct:.2f}", f"{maxdd_pct:.2f}", f"{sharpe:.4f}"])

    _write_metrics_csv(algo, csv_rows)
    # After month-end prints, flag minute cleanup for next morning
    setattr(algo, '_pending_minute_cleanup', True)
    # Disable T-60 window at close
    setattr(algo, '_t60_window_active', False)
"""Configuration constants for symbol universes and external series.

These are shared across algorithms to avoid duplication and typos.
In QC Web IDE, import as:

    from helpers import EQUITY_LIST, BOND_LIST

"""

# Core equity universe
EQUITY_LIST = [
    'DIA', 'IGV', 'IWD', 'IYM', 'SCHH', 'VT', 'VTWO', 'XLP', 'VDE', 'SPDW',
    'IWF', 'IJJ', 'IWM', 'IYT', 'VTV', 'XLU', 'EFA', 'IWB', 'IJR', 'IJH',
    'IWN', 'IYW', 'VB', 'VUG', 'XLV', 'EFG', 'IWO', 'IJK', 'KBE', 'VBK',
    'XLY', 'XME', 'EFV', 'ITB', 'REM', 'IWP', 'KRE', 'VBR', 'XLB', 'XHB',
    'EZU', 'IVE', 'IWS', 'VEA', 'XOP', 'IBB', 'SPMD', 'ACWX', 'IYE', 'OEF',
    'VGK', 'XLE', 'XRT', 'IEV', 'IVW', 'IYF', 'QQQ', 'VGT', 'XLF', 'IWR',
    'IYZ', 'IYH', 'RSP', 'VHT', 'XLI', 'VNQ', 'VYM', 'XLK', 'IXC', 'SOXX',
    'IYR', 'SCHD', 'SCHG', 'SCHV', 'VEU', 'SMH', 'OIH', 'SPSM', 'SPYG', 'SPYV'
]

# Core bond/alt universe
BOND_LIST = [
    'BND', 'BNDX', 'MBB', 'HYG', 'JNK', 'SJNK', 'VCIT',
    'IEI', 'IEF', 'SHY', 'EMLC', 'PCY', 'BKLN', 'CWB',
    'TLT', 'PFF', 'EMB', 'LQD', 'VCLT', 'VCSH', 'BIV',
    'FTSL', 'MORT', 'PSK', 'PGF', 'PFXF', 'ZROZ', 'AGG',
    'TLH', 'GLD', 'SPTL', 'USCI', 'BLV', 'VGLT', 'SRLN',
    'VNQ', 'USO', 'PGX', 'IGIB', 'REM', 'EDV', 'VMBS'
]

# FRED series defaults (used with helpers.fred.Fred)
FRED_DEFAULT_SERIES = "FRED/EFFR"  # Effective Federal Funds Rate


# Global toggles (defaults) for logging/metrics
# Controls whether per–sub-portfolio metrics (CAGR, MaxDD, Alpha/Beta) are logged at end of algorithm
# This value is used as the default in configure_logging_flags and can be overridden by profile or overrides
SUB_PORT_METRICS_DEFAULT = True

# Single, explicit log overrides (edit here to force logging behavior across profiles).
# These are applied after the selected profile so they always win.
# Set True/False as desired; leave keys absent to keep profile defaults.
LOG_OVERRIDES = {
    # Core prints
    'sub_port_metrics': False,            # disables "=== Sub-Portfolio X Metrics ===" lines
    'sub_port_audit': False,              # disables "=== Sub-Portfolio X Audit ===" lines
    'sub_portfolio_accounting': False,    # gates monthly NAV snapshot at T-0
    'POSITION_REPORTING_FLAG': False,
    'abbreviated_positions_print': False,
    'sub_port_positions': False,
    'PRINT_CLOSED_POSITIONS': False,
    'PRINT_AGGREGATION_DIAGNOSTICS': False,
    'PRINT_CASH_BOUNDARY': False,
    'PRINT_REGIME_SUMMARY': False,
    'PRINT_REGIME_STATUS': False,
    'WRITE_ANALYTICS_CSV': False,
    'PRINT_T60_COMPARE_ONCE': False,
    # End-of-run Downside Capture explicit toggle (independent of sub_port_metrics)
    'PRINT_DOWNSIDE_CAPTURE_SUMMARY': True,
    # Funding summary (EndOfAlgorithm). Set True to always print, False to suppress.
    # You can change this flag here to control all runs without touching main.py
    'PRINT_FUNDING_SUMMARY': True,
    # Misc toggles frequently used in audit mode
    'audit_leverage': False,
    # Keep ranking comment flag quiet by default (1 prints a couple of lines; set to 0 to suppress)
    'flagCommentRank': 1,
}


# --- Utilities ---
from datetime import datetime

try:
    from AlgorithmImports import Resolution  # type: ignore
except Exception:  # local lint fallback
    class Resolution:  # type: ignore
        Daily = 0
        Minute = 1


def add_equity_list(algo, tickers, use_minute=False):
    """Add a list of equities at Daily resolution; optionally add Minute.

    Returns a list of Symbol objects from AddEquity.
    """
    symbols = []
    if not tickers:
        return symbols
    for ticker in tickers:
        try:
            sec = algo.AddEquity(ticker, Resolution.Daily)
            symbols.append(sec.Symbol)
            if use_minute:
                # Add lightweight minute subscription as well
                algo.AddEquity(ticker, Resolution.Minute)
        except Exception as e:
            try:
                algo.Debug(f"[add_equity_list] Failed to add {ticker}: {e}")
            except Exception:
                pass
    return symbols


def add_core_instruments(algo, use_minute=False):
    """Adds the baseline instruments used across strategies and assigns them on the algo.

    Mirrors the original behavior:
    - Most attributes are assigned to the Symbol (e.g., self.tlt = Symbol)
    - self.jpm is assigned to the Security object (not .Symbol), since code later accesses self.jpm.Symbol

    Returns a dict mapping attribute name -> assigned value (Symbol or Security).
    """
    core = {
        'tlt': ('TLT', True),
        'bond': ('BOND', True),
        'ief': ('IEF', True),
        'gld': ('GLD', True),
        'sptl': ('SPTL', True),
        'tlh': ('TLH', True),
        'edv': ('EDV', True),
        'spy': ('SPY', True),
        'uup': ('UUP', True),
        'iwb': ('IWB', True),
        'bsv': ('BSV', True),
        'vgsh': ('VGSH', True),
        'spti': ('SPTI', True),
        'schr': ('SCHR', True),
        'ftls': ('FTLS', True),
        'iwv': ('IWV', True),
        'jpm': ('JPM', False),  # keep Security (not Symbol), used for Exchange access
        'kbe': ('KBE', True),
        'csm': ('CSM', True),
    }

    assigned = {}
    for attr, (ticker, as_symbol) in core.items():
        try:
            sec = algo.AddEquity(ticker, Resolution.Daily)
            if use_minute:
                algo.AddEquity(ticker, Resolution.Minute)
            value = sec.Symbol if as_symbol else sec
            setattr(algo, attr, value)
            assigned[attr] = value
        except Exception as e:
            try:
                algo.Debug(f"[add_core_instruments] Failed to add {ticker}: {e}")
            except Exception:
                pass
    return assigned


def configure_universe(algo, universe_type="EQUITY", use_minute=False, overrides=None):
    """Configure the trading universe and related settings in one call.

    - Adds core instruments (SPY, JPM, TLT, etc.)
    - Adds target universe (equity or bonds) using add_equity_list
    - Sets INDUSTRY, CASH, CASH_FILTER_TYPES, HEDGE_VALUE
    - Sets benchmark per universe and standard multipliers/nuanced cash
    - Can be customized with 'overrides' dict to tweak defaults

    Returns a summary dict of key fields for logging/tests.
    """
    overrides = overrides or {}

    # Core instruments first
    add_core_instruments(algo, use_minute)

    # Defaults shared by universes
    algo.FIXED_UNIVERSE_TYPE = universe_type

    # Helper for safe override fetch
    def get(name, default):
        return overrides.get(name, default)

    if universe_type == "EQUITY":
        algo.EQUITY_LIST = list(get('EQUITY_LIST', EQUITY_LIST))
        add_equity_list(algo, algo.EQUITY_LIST, use_minute)
        algo.INDUSTRY = [algo.Symbol(t) for t in algo.EQUITY_LIST]
        # Mirror monolith: SECTOR defaults to INDUSTRY list
        algo.SECTOR = list(algo.INDUSTRY)
        algo.CASH = [algo.tlt, algo.ief, algo.gld, algo.sptl, algo.tlh]
        # Regime and cash filter sids
        algo.REGIME_EQ = get('REGIME_EQ', algo.spy)
        algo.REGIME_FI = get('REGIME_FI', algo.edv)
        algo.CASH_FILTERING_SID = get('CASH_FILTERING_SID', algo.uup)
        algo.CASH_FILTER_TYPES = get('CASH_FILTER_TYPES', ["sector"])  # equity default
        algo.HEDGE_VALUE = get('HEDGE_VALUE', 'EQUITY')
        algo.LONG_MOMENTUM_MULTIPLIER = get('LONG_MOMENTUM_MULTIPLIER', 0.00)
        algo.SHORT_MOMENTUM_MULTIPLIER = get('SHORT_MOMENTUM_MULTIPLIER', 0.00)
        algo.VOL_MULTIPLIER = get('VOL_MULTIPLIER', 1.00)
        algo.NUANCED_CASH = get('NUANCED_CASH', 0.00)
        # Long/Short default for EQUITY: long-only by default ('off').
        # The strategy's long exposure default remains 1.30x via main.py (self.long_leverage = 1.30).
        try:
            algo.LongShortMode = 'off'
        except Exception:
            pass
        # Benchmark: default to a broad equity benchmark
        algo.SetBenchmark(algo.iwv)
        algo.BENCHMARK_SYMBOL = getattr(algo, 'iwv', None)

    elif universe_type == "BONDS":
        algo.BOND_LIST = list(get('BOND_LIST', BOND_LIST))
        add_equity_list(algo, algo.BOND_LIST, use_minute)
        algo.INDUSTRY = [algo.Symbol(t) for t in algo.BOND_LIST]
        # Mirror monolith: SECTOR defaults to INDUSTRY list
        algo.SECTOR = list(algo.INDUSTRY)
        algo.CASH = [algo.bsv, algo.vgsh, algo.spti, algo.schr]
        # Regime and cash filter sids
        algo.REGIME_EQ = get('REGIME_EQ', algo.spy)
        algo.REGIME_FI = get('REGIME_FI', algo.edv)
        algo.CASH_FILTERING_SID = get('CASH_FILTERING_SID', algo.uup)
        algo.CASH_FILTER_TYPES = get('CASH_FILTER_TYPES', ["sector", "industry"])  # bonds default
        algo.HEDGE_VALUE = get('HEDGE_VALUE', 'EQUITY')
        algo.LONG_MOMENTUM_MULTIPLIER = get('LONG_MOMENTUM_MULTIPLIER', 1.00)
        algo.SHORT_MOMENTUM_MULTIPLIER = get('SHORT_MOMENTUM_MULTIPLIER', 0.00)
        algo.VOL_MULTIPLIER = get('VOL_MULTIPLIER', 0.00)
        algo.NUANCED_CASH = get('NUANCED_CASH', 40.00)
        # Long/Short defaults: bonds-friendly = long-only unless QC Parameter overrides
        try:
            algo.LongShortMode = 'off'
        except Exception:
            pass
        # Bonds benchmark: prefer AGG; ensure it's added then set as benchmark
        try:
            agg_sec = algo.AddEquity('AGG', Resolution.Daily)
            algo.agg = agg_sec.Symbol
            algo.SetBenchmark(algo.agg)
            algo.BENCHMARK_SYMBOL = getattr(algo, 'agg', None)
        except Exception:
            # Fallback to legacy 'BOND' if AGG cannot be added
            try:
                algo.SetBenchmark(algo.bond)
                algo.BENCHMARK_SYMBOL = getattr(algo, 'bond', None)
            except Exception:
                pass
    else:
        # Unknown universe type; no-op but return summary
        return {
            'universe_type': universe_type,
            'industry_len': 0,
            'cash_len': 0,
            'cash_filter_types': [],
            'hedge_value': None,
        }

    summary = {
        'universe_type': universe_type,
        'industry_len': len(getattr(algo, 'INDUSTRY', [])),
        'cash_len': len(getattr(algo, 'CASH', [])),
        'cash_filter_types': getattr(algo, 'CASH_FILTER_TYPES', []),
        'hedge_value': getattr(algo, 'HEDGE_VALUE', None),
    }
    try:
        if hasattr(algo, 'audit_leverage') and algo.audit_leverage:
            algo.Log(f"[configure_universe] {summary}")
    except Exception:
        pass
    return summary


def configure_logging_flags(algo, overrides=None, profile="summary"):
    """Set all logging/reporting flags in one place with profiles.

    Parameters:
      - overrides: dict of flag->value to override profile defaults
      - profile: one of {"summary", "detailed", "audit"}

    Returns a small dict of key flags for confirmation.
    """
    if overrides is None:
        overrides = {}

    # Base defaults (summary-like, safe and quiet)
    flags = {
        'summary_logs_or_detailed': 'summary',
        'summarize_long_short_profit': False,
        'abbreviated_positions_print': False,
        'POSITION_REPORTING_FLAG': False,
    'PRINT_REGIME_STATUS': False,
    'PRINT_REGIME_SUMMARY': False,
        'PRINT_CLOSED_POSITIONS': False,
        'PRINT_AGGREGATION_DIAGNOSTICS': False,
    'PRINT_CASH_BOUNDARY': False,
        'sub_port_positions': False,
        'sub_port_audit': False,
        'sub_port_metrics': SUB_PORT_METRICS_DEFAULT,
        'sub_portfolio_accounting': False,
    'audit_leverage': False,
        'Print_Borrow_Cost_Summary': False,
        # End-of-run funding summary toggle
        'PRINT_FUNDING_SUMMARY': True,
        'WRITE_ANALYTICS_CSV': False,
    'PRINT_T60_COMPARE_ONCE': False,
    'flagCommentRank': 1, # 1, 2, or 'None'
    # Regime control: always period-based using SMA_PERIOD
    # Long/Short defaults (discoverable; QC parameters take precedence)
    # Long/Short behavior default. Override via QC Parameter LongShortMode, in Main (self.LongShortMode), or here.
    # Accepted values (case-insensitive):
    #   - "off", "none", "long-only", "longonly"   → long-only (no shorts)
    #   - "130-30", "130/30", "ls", "longshort", "l/s" → standard 130/30
    #   - "100-30", "100/30"                        → standard 100/30
    #   - Custom pattern like "150-50" or "150/50"   → long=1.50x, short mass=50%
    'LongShortMode': 'off',  # Global default: long-only (bond-friendly). QC Parameter overrides this.
    # Optional inputs (effective when LongShortMode enables shorts or for custom patterns):
    #   LongLeverage: float, e.g., 1.30 for 130% gross long exposure (primarily for EQUITY)
    #   BondLeverage (QC Parameter only): float, overrides long exposure for UNIVERSE=BONDS; default is 1.00 (no leverage)
    #   ShortMassPercent: float, e.g., 30.0 for 30% short mass in risk-on (maps to HEDGE_MULTIPLIER 0.30)
    #   ShortCount: int or None, number of shorts to take from bottom-ranked set in risk-on (defaults to TOPN//2)
    'LongLeverage': 1.30,    # used when LongShortMode in {'off','custom'} or as base
    'ShortMassPercent': 30.0, # used for 'custom' (maps to HEDGE_MULTIPLIER 0.30)
    'ShortsInRiskOff': False, # reserved for future; current logic disables shorts in Risk-off
    # Optional explicit short count (defaults to floor(INDUSTRY_TOPN/2) in Risk-on)
    'ShortCount': None,
    # Frequency guard: if enabled, after aggregating sub-portfolio weights, drop any non-cash
    # symbol whose sub-frequency (count of subs with positive weight) is lower than the highest
    # cash-complex frequency (max over CASH + CASH_FILTERING_SID). This enforces that we are not
    # long non-cash names with less supporting frequency than our cash complex.
    'APPLY_CASH_FREQUENCY_GUARD': False,
    # Unified TopN selection: if enabled, the final TopN cap is applied across cash and non-cash
    # together by aggregated weight, instead of prioritizing non-cash first. Use together with
    # APPLY_CASH_FREQUENCY_GUARD to align with “treat cash and non-cash the same” selection intent.
    'APPLY_UNIFIED_TOPN_SELECTION': False,
    # Portfolio-level cash truncation toggle: when True, if the cash filter SID appears inside the
    # aggregated TopN window (by positive weight order), truncate non-cash beyond that boundary and
    # let remaining mass flow to the cash complex (cash proxies + cash filter SID).
    'APPLY_CASH_TRUNCATION_AT_PORTFOLIO': False,
        # Ranking diagnostics (read by helpers.ranking)
        'DIAG_BOUNDARY_META': False,
        'DIAG_RANKING_BOUNDARY': False,
        'RANKING_BOUNDARY_K': 3,
        'DIAG_SERIES_DUMP': False,
        # Optional symbol filter for series dump; when None, dumps all (or use DIAG_SERIES_DUMP=SPY via parameter parsing)
        'DIAG_SERIES_DUMP_SYMBOL': None,
        # Charting flags
        # Consolidate all sub-portfolio series into a single panel when True
        'SUB_CHART_SINGLE_PANEL': False,
    # Per-panel series limits
    # Keep small so additional windows (-2, -3, ...) render reliably.
    # With 21 sub-portfolios, use 7 per chart to produce exactly 3 charts.
    'SUB_CHART_SERIES_LIMIT': 7,
        # Optional separate DD controls (fallback to SUB_CHART_* when None/0)
        'SUB_DD_SINGLE_PANEL': False, # None,
    'SUB_DD_SERIES_LIMIT': 7,
    }

    # Profile presets
    profiles = {
        'summary': {},
        'detailed': {
            'summary_logs_or_detailed': 'detailed',
            'POSITION_REPORTING_FLAG': True,
            'PRINT_CLOSED_POSITIONS': True,
            'summarize_long_short_profit': True,
            'print_position_pnl': True,
            'PRINT_REGIME_SUMMARY': True,
        },
        'audit': {
            'summary_logs_or_detailed': 'detailed',
            'POSITION_REPORTING_FLAG': True,
            'PRINT_CLOSED_POSITIONS': True,
            'summarize_long_short_profit': True,
            'print_position_pnl': True,
            'PRINT_REGIME_SUMMARY': True,
            'PRINT_REGIME_STATUS': True,
            'audit_leverage': True,
            'audit_sub_port_history': True,
            'sub_port_positions': True,
            'sub_port_audit': True,
            'sub_port_metrics': True,
            'audit_sub_port_periods': True,
            'multiplier_log': True,
            'alpha_mo': True,
            'lhist_check': True,
            'margin_summary': True,
            'Print_Borrow_Cost_Summary': True,
        },
    }

    # Apply profile then overrides
    flags.update(profiles.get(profile or 'summary', {}))
    if isinstance(overrides, dict):
        flags.update(overrides)

    # Set attributes on algo
    for k, v in flags.items():
        setattr(algo, k, v)

    return {
        'profile': profile,
        'summary_logs_or_detailed': flags['summary_logs_or_detailed'],
        'POSITION_REPORTING_FLAG': flags['POSITION_REPORTING_FLAG'],
        'PRINT_CLOSED_POSITIONS': flags['PRINT_CLOSED_POSITIONS'],
        'PRINT_T60_COMPARE_ONCE': flags['PRINT_T60_COMPARE_ONCE'],
    }
def calculate_commission(fill_quantity):
    """Calculate commission: $0.005 per share, $1 minimum per order."""
    return max(0.005 * abs(fill_quantity), 1.0)
# Funding and margin cost helpers
from typing import Any

try:
    from AlgorithmImports import *  # type: ignore
except Exception:
    pass


def apply_margin_and_borrow_costs(algo: Any) -> None:
    """Apply daily funding costs to portfolio cash.
    - Short rebate: EFFR - SHORT_BORROW_SPREAD_BPS
    - Stock borrow fee: STOCK_BORROW_FEE_BPS
    - Long margin interest: EFFR + LONG_MARGIN_SPREAD_BPS on leveraged longs
    - Optional short margin interest if applicable
    Accumulates totals on the algo for reporting.
    """
    try:
        fedfunds = float(getattr(algo, "fedfunds_rate", 0.02))
        long_spread_bps = float(getattr(algo, "LONG_MARGIN_SPREAD_BPS", 50))
        short_borrow_spread_bps = float(getattr(algo, "SHORT_BORROW_SPREAD_BPS", 50))
        stock_borrow_fee_bps = float(getattr(algo, "STOCK_BORROW_FEE_BPS", 50))

        annual_short_rebate_rate = fedfunds - short_borrow_spread_bps / 10000.0
        annual_stock_borrow_fee = stock_borrow_fee_bps / 10000.0
        annual_long_margin_rate = fedfunds + long_spread_bps / 10000.0

        daily_long_margin_rate = annual_long_margin_rate / 252.0
        daily_short_rebate_rate = annual_short_rebate_rate / 252.0
        daily_stock_borrow_fee = annual_stock_borrow_fee / 252.0

        total_short_rebate = 0.0
        total_stock_borrow_fee = 0.0
        commission_total = 0.0
        # Sum commissions for all filled orders for the current day
        try:
            if hasattr(algo, 'Transactions') and hasattr(algo, 'Time'):
                orders = algo.Transactions.GetOrders(lambda o: o.Status == 2 and hasattr(o, 'Time') and o.Time.date() == algo.Time.date())
                commission_total = sum(getattr(order, 'Commission', 0.0) for order in orders)
        except Exception:
            pass
        # Iterate current portfolio for shorts
        portfolio_items = []
        try:
            portfolio_items = [(kvp.Key, kvp.Value) for kvp in algo.Portfolio]
        except Exception:
            try:
                portfolio_items = list(getattr(algo, 'Portfolio', {}).items())
            except Exception:
                portfolio_items = []
        for symbol, holding in portfolio_items:
            try:
                if holding.Quantity < 0 and algo.Securities[symbol].Price > 0:
                    short_value = abs(holding.Quantity) * algo.Securities[symbol].Price
                    rebate = short_value * daily_short_rebate_rate
                    borrow_fee = short_value * daily_stock_borrow_fee
                    total_short_rebate += rebate
                    total_stock_borrow_fee += borrow_fee
            except Exception:
                continue
        net_short_funding = total_short_rebate - total_stock_borrow_fee
        if net_short_funding != 0:
            try:
                algo.Portfolio.CashBook[algo.AccountCurrency].AddAmount(net_short_funding)
            except Exception:
                pass

        # Compute gross short market value once for use below (netting against long interest base)

        # Collect commission costs for the day if available
        try:
            transactions = getattr(algo, 'Transactions', None)
            if transactions is not None:
                for order in transactions.GetOrders(lambda o: o.Status == 2):  # 2 = Filled
                    commission_total += order.Commission
        except Exception:
            pass

        # Long margin interest: policy-controlled base
        try:
            gross_short_value = sum(
                abs(h.HoldingsValue) for s, h in portfolio_items
                if h.Quantity < 0 and algo.Securities[s].Price > 0
            )
        except Exception:
            gross_short_value = 0.0

        # Long margin interest: policy-controlled base
        try:
            gross_long_value = sum(
                h.HoldingsValue for s, h in portfolio_items
                if h.Quantity > 0 and algo.Securities[s].Price > 0
            )
        except Exception:
            gross_long_value = 0.0
        base_equity = float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0))
        # Net short proceeds against long debit when computing long interest (always-on policy)
        # Professional convention: in L/S portfolios, shorts finance part of the longs.
        long_interest_base = max(0.0, gross_long_value - base_equity - gross_short_value)
        long_margin_cost = 0.0
        if long_interest_base > 0.0:
            long_margin_cost = long_interest_base * daily_long_margin_rate
            try:
                algo.Portfolio.CashBook[algo.AccountCurrency].AddAmount(-long_margin_cost)
            except Exception:
                pass
            if not hasattr(algo, 'cumulative_long_margin_cost'):
                algo.cumulative_long_margin_cost = 0.0
            algo.cumulative_long_margin_cost += long_margin_cost

        # Print daily log of cost deductions if enabled
        try:
            log_costs = False
            # Allow QC IDE parameter to control logging
            param = getattr(algo, 'GetParameter', lambda k: None)("LOG_DAILY_COSTS")
            if param is not None and str(param).strip().lower() in ("1", "true", "yes", "y", "on"):
                log_costs = True
            if hasattr(algo, 'LOG_DAILY_COSTS'):
                log_costs = bool(getattr(algo, 'LOG_DAILY_COSTS')) or log_costs
            # Only print after StartDate (not during warmup)
            if log_costs and hasattr(algo, 'Time') and hasattr(algo, 'StartDate'):
                if algo.Time >= algo.StartDate:
                    total_portfolio_value = float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0))
                    algo.Log(
                        f"[COSTS] Commission: {commission_total:.4f}, LongMargin: {long_margin_cost:.4f}, "
                        f"ShortRebate: {total_short_rebate:.4f}, StockBorrowFee: {total_stock_borrow_fee:.4f}, "
                        f"NetShortFunding: {net_short_funding:.4f}, LongInterestBase: {long_interest_base:.4f}, "
                        f"Rates: FedFunds={fedfunds:.4f}, LongSpreadBps={long_spread_bps}, ShortSpreadBps={short_borrow_spread_bps}, StockBorrowFeeBps={stock_borrow_fee_bps}, "
                        f"PortfolioValue: {total_portfolio_value:.2f}"
                    )
        except Exception:
            pass
        # Suppress daily runtime stats; summary will be emitted at EndOfAlgorithm

        # Extra short-side margin interest removed per updated policy
        short_margin_interest_cost = 0.0

        # Track cumulative short side items
        if not hasattr(algo, 'cumulative_short_rebate'):
            algo.cumulative_short_rebate = 0.0
        if not hasattr(algo, 'cumulative_stock_borrow_fee'):
            algo.cumulative_stock_borrow_fee = 0.0
        algo.cumulative_short_rebate += total_short_rebate
        algo.cumulative_stock_borrow_fee += total_stock_borrow_fee

        if getattr(algo, 'Print_Borrow_Cost_Summary', False):
            total_cost = (
                float(getattr(algo, 'cumulative_stock_borrow_fee', 0.0)) +
                float(getattr(algo, 'cumulative_long_margin_cost', 0.0)) +
                float(getattr(algo, 'cumulative_short_margin_interest_cost', 0.0))
            )
            algo.Log(
                f"Total Daily Margin Borrow Cost: "
                f"{total_stock_borrow_fee + long_margin_cost + short_margin_interest_cost:.2f} | "
                f"Cumulative: {total_cost:.2f}"
            )
    except Exception:
        pass


def get_short_collateral(algo: Any) -> float:
    collateral = 0.0
    percent = float(getattr(algo, "SHORT_CASH_PERCENTAGE", 1.0))
    try:
        for symbol, holding in getattr(algo, 'Portfolio', {}).items():
            if holding.Quantity < 0 and symbol in getattr(algo, 'Securities', {}):
                price = float(algo.Securities[symbol].Price)
                if price > 0:
                    collateral += abs(holding.Quantity) * price * percent
    except Exception:
        pass
    return float(collateral)


def get_adjusted_portfolio_value(algo: Any) -> float:
    gross_value = float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0)) * (100.0 - float(getattr(algo, 'CASH_PERCENT', 0.0))) / 100.0
    short_collateral = get_short_collateral(algo)
    return float(gross_value - short_collateral)
# Data source helpers: minute gating, prewarm, cleanup
from typing import Any, Iterable, Set
import math

try:
    from AlgorithmImports import *  # type: ignore
except Exception:
    SubscriptionDataConfig = None  # type: ignore
    Resolution = None  # type: ignore
from .ranking import update_ranking_snapshot


def prewarm_minute_data(algo: Any) -> None:
    # Template: set T-60 window active, compute ranking, ensure minute subs
    if not hasattr(algo, '_t60_window_active'):
        algo._t60_window_active = False
    algo._t60_window_active = True
    # One-time informational log at first activation
    try:
        if getattr(algo, 'PRINT_T60_COMPARE_ONCE', False) and not getattr(algo, '_printed_t60_once', False):
            algo.Log(
                f"[T-60] Window activated at {getattr(algo, 'Time', None)}. "
                f"Minute subs will be enabled for the trade set; regime computed and held until close."
            )
            setattr(algo, '_printed_t60_once', True)
    except Exception:
        pass
    # Compute and store regime at T-60 so downstream flows can use it
    try:
        if hasattr(algo, 'calculate_regime'):
            reg_tuple = algo.calculate_regime()
            if isinstance(reg_tuple, tuple) and len(reg_tuple) == 5:
                regime, ratio, ratioSMA, regime_eq, regime_fi = reg_tuple
                setattr(algo, 'regime', regime)
                setattr(algo, 'regime_ratio', ratio)
                setattr(algo, 'regime_ratio_sma', ratioSMA)
                setattr(algo, 'regime_eq', regime_eq)
                setattr(algo, 'regime_fi', regime_fi)
                # Simple status line when enabled
                if getattr(algo, 'PRINT_REGIME_STATUS', False):
                    try:
                        algo.Log(f"Regime Status (T-60): {regime}")
                    except Exception:
                        pass
                # Optional summary print when enabled
                if getattr(algo, 'PRINT_REGIME_SUMMARY', False):
                    try:
                        def _fmt(x, nd=4):
                            try:
                                return f"{float(x):.{nd}f}" if x is not None and not math.isnan(float(x)) else "nan"
                            except Exception:
                                return str(x)
                        fed = getattr(algo, 'fedfunds_rate', None)
                        daily_long_margin_rate = None
                        if fed is not None:
                            daily_long_margin_rate = (float(fed) + float(getattr(algo, 'LONG_MARGIN_SPREAD_BPS', 50)) / 10000.0) / 252.0
                        algo.Log(
                            "Regime Summary (T-60): "
                            f"regime={regime}, ratio={_fmt(ratio)}, sma={_fmt(ratioSMA)}, "
                            f"eq={_fmt(regime_eq, nd=2)}, fi={_fmt(regime_fi, nd=2)}"
                            + (f", fedfunds={fed:.4f}, daily_long_margin_rate={daily_long_margin_rate:.6f}" if fed is not None and daily_long_margin_rate is not None else "")
                        )
                    except Exception:
                        pass
    except Exception:
        pass
    # Populate ranking snapshot so sub_portfolios contain topranked/bottomranked for rebalance
    try:
        update_ranking_snapshot(algo)
    except Exception:
        pass
    # Ensure minute subscriptions for symbols you intend to trade
    # _ensure_minute_for_trade_set(algo, symbols)


def _ensure_minute_for_trade_set(algo: Any, symbols: Iterable) -> None:
    if not getattr(algo, '_t60_window_active', False):
        return
    for s in symbols:
        try:
            if SubscriptionDataConfig is not None and Resolution is not None:
                algo.SubscriptionManager.Add(SubscriptionDataConfig(algo.Securities[s].SubscriptionDataConfig, Resolution.Minute))
        except Exception:
            pass


def _maybe_cleanup_minute_subs(algo: Any) -> None:
    if getattr(algo, '_pending_minute_cleanup', False):
        # Placeholder: clean up tracking structures
        algo._pending_minute_cleanup = False
import pandas as pd
import io

def save_eom_to_object_store(algo, filename="tools/eom_performance.csv"):
    rows = getattr(algo, "_eom_perf_rows", [])
    if not rows:
        algo.Log("No EOM performance rows to save.")
        return
    df = pd.DataFrame(rows)
    csv_bytes = io.BytesIO()
    df.to_csv(csv_bytes, index=False)
    csv_bytes.seek(0)
    algo.ObjectStore.Save(filename, csv_bytes.read())
    algo.Log(f"EOM performance data saved to Object Store as {filename}")
try:
    from AlgorithmImports import *  # type: ignore
except Exception:
    # Local dev fallbacks to quiet linters; QC provides these at runtime
    from datetime import datetime  # type: ignore
    class PythonData(object):
        pass
    class SubscriptionDataSource:
        def __init__(self, *args, **kwargs):
            pass
    class SubscriptionTransportMedium:
        RemoteFile = 0
else:
    # When running on QC, AlgorithmImports includes datetime
    from datetime import datetime


class Fred(PythonData):
    def GetSource(self, config, date, isLiveMode):
        return SubscriptionDataSource(
            f"https://fred.stlouisfed.org/graph/fredgraph.csv?id={config.Symbol.Value.split('/')[-1]}",
            SubscriptionTransportMedium.RemoteFile
        )

    def Reader(self, config, line, date, isLiveMode):
        if not line or line.startswith("DATE") or line.startswith(" "):
            return None
        try:
            split = line.split(',')
            date_str = split[0].strip()
            if len(date_str) != 10 or date_str.count('-') != 2:
                return None
            data = Fred()
            data.Symbol = config.Symbol
            data.Time = datetime.strptime(date_str, "%Y-%m-%d")
            valstr = split[1].strip()
            if valstr in ('.', '', 'NA', 'nan', 'NaN'):
                return None
            value = float(valstr)
            data.Value = value
            return data
        except Exception:
            return None
def print_final_metrics(self):
    debug = getattr(self, 'METRICS_DEBUG_LOGS', False)
    if debug:
        self.Log("[metrics debug] ENTERED print_final_metrics (ungated)")
    try:
        # Gate all logs: only print if self.Time >= self.StartDate
        if not hasattr(self, 'Time') or not hasattr(self, 'StartDate') or self.Time < self.StartDate:
            return
        # Only print logs if self.Time >= self.StartDate
        if self.Time < self.StartDate:
            return
        import numpy as np
        total_values = np.array(getattr(self, 'daily_total_values', []), dtype=np.float64)
        portfolio_returns = np.array(getattr(self, 'daily_portfolio_returns', []), dtype=np.float64)
        benchmark_returns = np.array(getattr(self, 'daily_benchmark_returns', []), dtype=np.float64)
        nav_timeline = np.array(getattr(self, 'overview_nav_timeline', []))
        # Detailed summary log
        if debug:
            self.Log(f"[metrics debug] print_final_metrics called at {getattr(self, 'Time', 'NA')} (StartDate={getattr(self, 'StartDate', 'NA')})")
            self.Log(f"[metrics debug] total_values len={len(total_values)} sample={total_values[:5]} last={total_values[-5:] if len(total_values) > 5 else total_values}")
            self.Log(f"[metrics debug] portfolio_returns len={len(portfolio_returns)} sample={portfolio_returns[:5]} last={portfolio_returns[-5:] if len(portfolio_returns) > 5 else portfolio_returns}")
            self.Log(f"[metrics debug] benchmark_returns len={len(benchmark_returns)} sample={benchmark_returns[:5]} last={benchmark_returns[-5:] if len(benchmark_returns) > 5 else benchmark_returns}")
            self.Log(f"[metrics debug] nav_timeline len={len(nav_timeline)} sample={nav_timeline[:5]} last={nav_timeline[-5:] if len(nav_timeline) > 5 else nav_timeline}")
            # Log types and suspicious conditions
            if len(nav_timeline) > 0:
                self.Log(f"[metrics debug] nav_timeline types: {[type(x) for x in nav_timeline[:5]]} ... {[type(x) for x in nav_timeline[-5:]]}")
            if len(total_values) != len(nav_timeline):
                self.Log(f"[metrics debug] WARNING: total_values and nav_timeline length mismatch: {len(total_values)} vs {len(nav_timeline)}")
            if len(portfolio_returns) != len(nav_timeline):
                self.Log(f"[metrics debug] WARNING: portfolio_returns and nav_timeline length mismatch: {len(portfolio_returns)} vs {len(nav_timeline)}")
            if len(benchmark_returns) != len(nav_timeline):
                self.Log(f"[metrics debug] WARNING: benchmark_returns and nav_timeline length mismatch: {len(benchmark_returns)} vs {len(nav_timeline)}")
            # Log if any array is all zeros or constant
            if len(total_values) > 1 and all(x == total_values[0] for x in total_values):
                self.Log(f"[metrics debug] WARNING: total_values is constant: {total_values[0]}")
            if len(portfolio_returns) > 1 and all(x == portfolio_returns[0] for x in portfolio_returns):
                self.Log(f"[metrics debug] WARNING: portfolio_returns is constant: {portfolio_returns[0]}")
            if len(benchmark_returns) > 1 and all(x == benchmark_returns[0] for x in benchmark_returns):
                self.Log(f"[metrics debug] WARNING: benchmark_returns is constant: {benchmark_returns[0]}")
        periods_to_print = ['YTD', '1YR', '3YR', '5YR', '10YR', 'FULL']
        header = (
            "Period | StartDate | EndDate | CAGR | BenchReturn | MaxDrawdown | STDEV | TrackingError | BenchmarkDrawdown | BenchSD | Sharpe | Sortino | Alpha | Beta | InfoRatio | UpCapture | DownCapture"
        )
        self.Log(header)
        any_metrics = False
        rf_daily = 0.0  # risk-free rate, can be parameterized
        for label in periods_to_print:
            if label == 'YTD':
                t_val = total_values
                p_ret = portfolio_returns
                b_ret = benchmark_returns
                period_timeline = nav_timeline
                years = len(t_val) / 252.0 if len(t_val) > 0 else 0
            elif label == '1YR':
                days = 252
                t_val = total_values[-days:] if len(total_values) >= days else total_values
                p_ret = portfolio_returns[-days:] if len(portfolio_returns) >= days else portfolio_returns
                b_ret = benchmark_returns[-days:] if len(benchmark_returns) >= days else benchmark_returns
                period_timeline = nav_timeline[-days:] if len(nav_timeline) >= days else nav_timeline
                years = len(t_val) / 252.0 if len(t_val) > 0 else 0
            elif label == '3YR':
                days = 252 * 3
                t_val = total_values[-days:] if len(total_values) >= days else total_values
                p_ret = portfolio_returns[-days:] if len(portfolio_returns) >= days else portfolio_returns
                b_ret = benchmark_returns[-days:] if len(benchmark_returns) >= days else benchmark_returns
                period_timeline = nav_timeline[-days:] if len(nav_timeline) >= days else nav_timeline
                years = len(t_val) / 252.0 if len(t_val) > 0 else 0
            elif label == '5YR':
                days = 252 * 5
                t_val = total_values[-days:] if len(total_values) >= days else total_values
                p_ret = portfolio_returns[-days:] if len(portfolio_returns) >= days else portfolio_returns
                b_ret = benchmark_returns[-days:] if len(benchmark_returns) >= days else benchmark_returns
                period_timeline = nav_timeline[-days:] if len(nav_timeline) >= days else nav_timeline
                years = len(t_val) / 252.0 if len(t_val) > 0 else 0
            elif label == '10YR':
                days = 252 * 10
                t_val = total_values[-days:] if len(total_values) >= days else total_values
                p_ret = portfolio_returns[-days:] if len(portfolio_returns) >= days else portfolio_returns
                b_ret = benchmark_returns[-days:] if len(benchmark_returns) >= days else benchmark_returns
                period_timeline = nav_timeline[-days:] if len(nav_timeline) >= days else nav_timeline
                years = len(t_val) / 252.0 if len(t_val) > 0 else 0
            elif label == 'FULL':
                t_val = total_values
                p_ret = portfolio_returns
                b_ret = benchmark_returns
                period_timeline = nav_timeline
                years = len(t_val) / 252.0 if len(t_val) > 0 else 0
            else:
                continue
            if len(t_val) < 2 or len(p_ret) < 2 or len(b_ret) < 2 or len(period_timeline) < 2:
                continue
            start_date = str(period_timeline[0])[:10] if len(period_timeline) > 0 else ''
            end_date = str(period_timeline[-1])[:10] if len(period_timeline) > 0 else ''
            try:
                maxdd = calculate_max_drawdown(t_val)
                bench_nav = [1.0]
                for r in b_ret:
                    bench_nav.append(bench_nav[-1] * (1.0 + r))
                bench_nav = bench_nav[1:]
                bench_maxdd = calculate_max_drawdown(bench_nav)
            except Exception:
                maxdd = ''
                bench_maxdd = ''
            try:
                cagr = (t_val[-1] / t_val[0]) ** (1.0 / years) - 1.0 if t_val[0] > 0 and years > 0 else ''
            except Exception:
                cagr = ''
            try:
                bench_return = (bench_nav[-1] / bench_nav[0]) ** (1.0 / years) - 1.0 if bench_nav[0] > 0 and years > 0 else ''
            except Exception:
                bench_return = ''
            try:
                stdev = annualized_stdev(p_ret)
            except Exception:
                stdev = ''
            try:
                track_err = tracking_error(p_ret, b_ret)
            except Exception:
                track_err = ''
            try:
                bench_sd = annualized_stdev(b_ret)
            except Exception:
                bench_sd = ''
            try:
                sharpe = sharpe_ratio(p_ret, rf_daily)
            except Exception:
                sharpe = ''
            try:
                sortino = sortino_ratio(p_ret, rf_daily)
            except Exception:
                sortino = ''
            try:
                alpha, beta = alpha_beta(p_ret, b_ret, rf_daily)
            except Exception:
                alpha, beta = '', ''
            try:
                info = information_ratio(p_ret, b_ret)
            except Exception:
                info = ''
            try:
                upcap = up_capture(p_ret, b_ret)
            except Exception:
                upcap = ''
            try:
                downcap = down_capture(p_ret, b_ret)
            except Exception:
                downcap = ''
            row = (
                f"{label} | {start_date} | {end_date} | {fmt(cagr)} | {fmt(bench_return)} | {fmt(maxdd)} | {fmt(stdev)} | {fmt(track_err)} | {fmt(bench_maxdd)} | {fmt(bench_sd)} | {fmt(sharpe)} | {fmt(sortino)} | {fmt(alpha)} | {fmt(beta)} | {fmt(info)} | {fmt(upcap)} | {fmt(downcap)}"
            )
            self.Log(row)
            any_metrics = True
        if not any_metrics:
            self.Log("[metrics debug] No metrics rows printed: check data population and update logic.")
    except Exception as e:
        if hasattr(self, 'Time') and hasattr(self, 'StartDate') and self.Time >= self.StartDate:
            self.Log(f"[metrics debug] Exception in print_final_metrics: {e}")

import numpy as np

def calculate_max_drawdown(values):
    if values is None or len(values) < 2:
        return ''
    peak = values[0]
    max_dd = 0.0
    for v in values:
        if v > peak:
            peak = v
        dd = (peak - v) / peak if peak > 0 else 0.0
        if dd > max_dd:
            max_dd = dd
    return max_dd

def calculate_cagr(t_val, years):
    try:
        return (t_val[-1] / t_val[0]) ** (1.0 / years) - 1.0 if t_val[0] > 0 and years > 0 else ''
    except Exception:
        return ''

def calculate_max_drawdown(values):
    if values is None or len(values) < 2:
        return ''
    peak = values[0]
    max_dd = 0.0
    for v in values:
        if v > peak:
            peak = v
        dd = (peak - v) / peak if peak > 0 else 0.0
        if dd > max_dd:
            max_dd = dd
    return max_dd

def annualized_stdev(returns):
    try:
        return np.std(returns, ddof=1) * np.sqrt(252) if len(returns) > 1 else ''
    except Exception:
        return ''

def tracking_error(p_ret, b_ret):
    try:
        return np.std(np.array(p_ret) - np.array(b_ret), ddof=1) * np.sqrt(252) if len(p_ret) > 1 else ''
    except Exception:
        return ''

def sharpe_ratio(p_ret, rf_daily):
    try:
        excess = np.array(p_ret) - rf_daily
        mean_excess = np.mean(excess)
        std_excess = np.std(excess, ddof=1)
        return np.sqrt(252) * mean_excess / std_excess if len(p_ret) > 1 and std_excess > 0 else ''
    except Exception:
        return ''

def sortino_ratio(p_ret, rf_daily):
    try:
        excess = np.array(p_ret) - rf_daily
        downside = excess[excess < 0]
        mean_excess = np.mean(excess)
        std_downside = np.std(downside, ddof=1)
        return np.sqrt(252) * mean_excess / std_downside if len(downside) > 0 and std_downside > 0 else ''
    except Exception:
        return ''

def alpha_beta(p_ret, b_ret, rf_daily):
    try:
        if len(p_ret) > 1 and len(b_ret) > 1:
            y_alpha = np.array(p_ret) - rf_daily
            x_alpha = np.array(b_ret) - rf_daily
            X_alpha = np.vstack([np.ones(len(x_alpha)), x_alpha]).T
            coeffs_alpha = np.linalg.lstsq(X_alpha, y_alpha, rcond=None)[0]
            alpha = coeffs_alpha[0] * 252
            x = np.array(b_ret)
            y = np.array(p_ret)
            if np.std(x, ddof=1) > 0:
                beta = np.cov(y, x)[0, 1] / np.var(x, ddof=1)
            else:
                beta = ''
        else:
            alpha = ''
            beta = ''
        return alpha, beta
    except Exception:
        return '', ''

def information_ratio(p_ret, b_ret):
    try:
        active = np.array(p_ret) - np.array(b_ret)
        mean_active = np.mean(active)
        std_active = np.std(active, ddof=1)
        return np.sqrt(252) * mean_active / std_active if len(active) > 1 and std_active > 0 else ''
    except Exception:
        return ''

def up_capture(p_ret, b_ret):
    try:
        up_mask = np.array(b_ret) > 0
        if np.any(up_mask):
            up_port = np.array(p_ret)[up_mask]
            up_bench = np.array(b_ret)[up_mask]
            return np.mean(up_port) / np.mean(up_bench) if np.mean(up_bench) != 0 else ''
        else:
            return ''
    except Exception:
        return ''

def down_capture(p_ret, b_ret):
    try:
        down_mask = np.array(b_ret) < 0
        if np.any(down_mask):
            down_port = np.array(p_ret)[down_mask]
            down_bench = np.array(b_ret)[down_mask]
            return np.mean(down_port) / np.mean(down_bench) if np.mean(down_bench) != 0 else ''
        else:
            return ''
    except Exception:
        return ''

def fmt(x):
    try:
        return f"{float(x):.4f}"
    except Exception:
        return ''
# Order helpers: build and submit MOC baskets
from typing import Any, Dict, Iterable, List, Tuple

try:
    from AlgorithmImports import *  # type: ignore
except Exception:
    OrderDirection = None  # type: ignore
    OrderTag = str


OrderSpec = Tuple[Any, int, str]  # (symbol, quantity, tag)


def build_moc_basket(algo: Any, target_deltas: Dict[Any, int]) -> Tuple[List[OrderSpec], List[OrderSpec]]:
    sells: List[OrderSpec] = []
    buys: List[OrderSpec] = []
    for sym, qty in target_deltas.items():
        tag = "MOC Rebalance"
        if qty < 0:
            sells.append((sym, abs(qty), tag))
        elif qty > 0:
            buys.append((sym, qty, tag))
    # Important: sell orders first, then buys
    return sells, buys


def submit_moc_basket(algo: Any, sells: List[OrderSpec], buys: List[OrderSpec]) -> None:
    # Guard against submitting orders during warmup (unless explicitly allowed)
    try:
        if getattr(algo, 'IsWarmingUp', False) and not getattr(algo, 'ALLOW_TRADES_DURING_WARMUP', False):
            try:
                algo.Debug("[Orders] Skipping MOC basket submission during warmup")
            except Exception:
                pass
            return
    except Exception:
        pass
    # Submit sells first
    for sym, qty, tag in sells:
        try:
            q = int(qty)
            if q == 0:
                continue
            # Coerce ticker string to Symbol if needed
            sym_obj = sym
            try:
                if isinstance(sym, str):
                    sym_obj = algo.Symbol(sym)
            except Exception:
                sym_obj = sym
            algo.MarketOnCloseOrder(sym_obj, -q, tag=tag)
        except Exception as e:
            algo.Debug(f"[MOC SELL REJECTED] {sym} qty={qty} err={e}")
    # Then buys
    for sym, qty, tag in buys:
        try:
            q = int(qty)
            if q == 0:
                continue
            sym_obj = sym
            try:
                if isinstance(sym, str):
                    sym_obj = algo.Symbol(sym)
            except Exception:
                sym_obj = sym
            algo.MarketOnCloseOrder(sym_obj, q, tag=tag)
        except Exception as e:
            algo.Debug(f"[MOC BUY REJECTED] {sym} qty={qty} err={e}")
from typing import Any, Dict, List
from datetime import datetime
import gzip
import base64
import csv
from io import StringIO

# EOM performance reporting and lifecycle tracking helpers

TRUTHY = {"1", "true", "yes", "y", "on"}


def configure_perf_reporting(algo: Any) -> None:
    """Parse EOM_SNAPSHOT and PERF_EXPORT_CSV parameters, init ledgers, schedule EOM snapshot, and prepare CSV buffers."""
    try:
        # Parse QC Parameter (truthy to enable)
        enabled = bool(getattr(algo, 'EOM_SNAPSHOT_ENABLED', False))
        try:
            raw = algo.GetParameter("EOM_SNAPSHOT")
            if raw is not None and str(raw).strip() != "":
                enabled = str(raw).strip().lower() in TRUTHY
        except Exception:
            pass
        algo.EOM_SNAPSHOT_ENABLED = enabled
        # Parse CSV export toggle
        export_csv = bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False))
        try:
            raw2 = algo.GetParameter("PERF_EXPORT_CSV") or algo.GetParameter("LEDGER_EXPORT")
            if raw2 is not None and str(raw2).strip() != "":
                export_csv = str(raw2).strip().lower() in TRUTHY
        except Exception:
            pass
        algo.PERF_EXPORT_CSV_ENABLED = export_csv
        # Optional: write 'latest' convenience file alongside timestamped key (default ON)
        try:
            raw_latest = algo.GetParameter("PERF_CSV_WRITE_LATEST")
            algo.PERF_CSV_WRITE_LATEST = (str(raw_latest).strip().lower() in TRUTHY) if (raw_latest is not None and str(raw_latest).strip() != "") else True
        except Exception:
            algo.PERF_CSV_WRITE_LATEST = True
        # Optional early CSV preview controls (log a small head preview early in the run)
        try:
            raw_prev = algo.GetParameter("PERF_CSV_PREVIEW_EARLY")
            algo.PERF_CSV_PREVIEW_EARLY = (str(raw_prev).strip().lower() in TRUTHY) if (raw_prev is not None and str(raw_prev).strip() != "") else False
        except Exception:
            algo.PERF_CSV_PREVIEW_EARLY = False
        try:
            raw_head_n = algo.GetParameter("PERF_CSV_PREVIEW_HEAD_N")
            algo.PERF_CSV_PREVIEW_HEAD_N = int(float(str(raw_head_n).strip())) if (raw_head_n is not None and str(raw_head_n).strip() != "") else 25
        except Exception:
            algo.PERF_CSV_PREVIEW_HEAD_N = 25
        # Initialize ledgers (idempotent)
        if not hasattr(algo, '_eom_liquidated'):
            algo._eom_liquidated = []
        if not hasattr(algo, '_last_eom_month'):
            algo._last_eom_month = None
        if not hasattr(algo, '_ledger_opens'):
            algo._ledger_opens = []
        if not hasattr(algo, '_ledger_closes'):
            algo._ledger_closes = []
        if not hasattr(algo, '_ledger_eom_positions'):
            algo._ledger_eom_positions = []
        # CSV line accumulator
        if not hasattr(algo, '_perf_csv_rows'):
            algo._perf_csv_rows = []  # list of dict rows
        # Daily liquidation tracking (for daily POS snapshot)
        if not hasattr(algo, '_day_liquidated'):
            algo._day_liquidated = []
        if not hasattr(algo, '_last_day_key'):
            algo._last_day_key = None
        # Daily snapshot toggle (prints all positions each trade date at market close)
        try:
            raw_daily = algo.GetParameter("DAILY_SNAPSHOT")
            algo.DAILY_SNAPSHOT_ENABLED = (str(raw_daily).strip().lower() in TRUTHY) if (raw_daily is not None and str(raw_daily).strip() != "") else False
        except Exception:
            algo.DAILY_SNAPSHOT_ENABLED = False
        # One-time guard for early preview logging
        if not hasattr(algo, '_csv_preview_printed_early'):
            algo._csv_preview_printed_early = False
        # Schedule monthly EOM snapshot (after market close +5m)
        if enabled:
            try:
                # Require benchmark proxy (spy) to drive DateRules/TimeRules
                if hasattr(algo, 'spy') and algo.spy is not None:
                    algo.Schedule.On(
                        algo.DateRules.MonthEnd(algo.spy),
                        algo.TimeRules.AfterMarketClose(algo.spy, 5),
                        lambda: eom_snapshot(algo)
                    )
            except Exception:
                pass
        # Schedule daily snapshot (exact market close) if enabled
        if bool(getattr(algo, 'DAILY_SNAPSHOT_ENABLED', False)):
            try:
                if hasattr(algo, 'spy') and algo.spy is not None:
                    algo.Schedule.On(
                        algo.DateRules.EveryDay(algo.spy),
                        algo.TimeRules.AfterMarketClose(algo.spy, 0),
                        lambda: daily_snapshot(algo)
                    )
            except Exception:
                pass
        # One-line visibility into CSV settings
        try:
            algo.Log(
                f"[CSV] Export={'ON' if bool(getattr(algo,'PERF_EXPORT_CSV_ENABLED', False)) else 'OFF'}; "
                f"PreviewEarly={'ON' if bool(getattr(algo,'PERF_CSV_PREVIEW_EARLY', False)) else 'OFF'}; "
                f"HeadN={int(getattr(algo,'PERF_CSV_PREVIEW_HEAD_N', 25) or 25)}; "
                f"WriteLatest={'ON' if bool(getattr(algo,'PERF_CSV_WRITE_LATEST', True)) else 'OFF'}; "
                f"DumpFull={'ON' if bool(getattr(algo,'PERF_CSV_DUMP_FULL', False)) else 'OFF'}; "
                f"DumpChunkLines={int(getattr(algo,'PERF_CSV_DUMP_CHUNK_LINES', 0) or 0)}"
            )
            if not bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
                algo.Log("[CSV] Export is OFF. Enable with PERF_EXPORT_CSV=1 (alias: LEDGER_EXPORT=1)")
        except Exception:
            pass
    except Exception:
        pass


def execution_snapshot(algo: Any) -> None:
    """Emit an execution snapshot using current portfolio holdings, if enabled by EXEC_SNAPSHOT_LOG flag."""
    try:
        # New: Only log if EXEC_SNAPSHOT_LOG is enabled (QC Parameter)
        raw_exec_log = getattr(algo, 'EXEC_SNAPSHOT_LOG', None)
        if raw_exec_log is None:
            raw_exec_log = algo.GetParameter("EXEC_SNAPSHOT_LOG") if hasattr(algo, 'GetParameter') else None
        exec_log_enabled = str(raw_exec_log).strip().lower() in ("1","true","yes","y","on")
        # Allow execution snapshot rows to be captured for CSV even if EOM_SNAPSHOT is disabled.
        if not exec_log_enabled:
            return
        pv = float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0)) if hasattr(algo, 'Portfolio') else 0.0
        lines: List[str] = []
        try:
            for h in getattr(algo, 'Portfolio', {}).Values:
                try:
                    if getattr(h, 'Invested', False) and abs(float(getattr(h, 'Quantity', 0))) > 0:
                        sym = getattr(getattr(h, 'Symbol', None), 'Value', None)
                        qty = float(getattr(h, 'Quantity', 0))
                        price = float(getattr(h, 'Price', 0))
                        mv = float(getattr(h, 'HoldingsValue', 0))
                        weight = (mv / pv) if pv > 0 else 0.0
                        lines.append(f"{sym} | Qty: {qty:.0f} | Weight: {weight:.4f} | MOC Price: {price:.2f}")
                        if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
                            try:
                                algo._perf_csv_rows.append({
                                    'event_type': 'EXEC_SNAPSHOT',
                                    'time': getattr(algo, 'Time', None),
                                    'symbol': sym,
                                    'qty': qty,
                                    'weight': weight,
                                    'price': price,
                                    'mv': mv,
                                    'action': '',
                                    'fill_qty': '',
                                    'fill_price': '',
                                    'total_value': pv,
                                })
                            except Exception:
                                pass
                except Exception:
                    continue
        except Exception:
            pass
        if lines:
            algo.Log(f"EXECUTION SNAPSHOT | {algo.Time:%Y-%m-%d}")
            for ln in lines[:40]:
                algo.Log(ln)
            if len(lines) > 40:
                algo.Log(f"... ({len(lines)-40} more)")
        # Optional: print a small CSV head preview early to ensure visibility before heavier logs
        try:
            if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)) and bool(getattr(algo, 'PERF_CSV_PREVIEW_EARLY', False)) and not bool(getattr(algo, '_csv_preview_printed_early', False)):
                _log_csv_preview_head(algo)
                algo._csv_preview_printed_early = True
        except Exception:
            pass
    except Exception:
        pass


def on_order_event(algo: Any, orderEvent: Any) -> None:
    """Track opens and closes with fill timestamps and prices; also append to monthly liquidation list."""
    try:
        # Capture fills for CSV export even when EOM snapshots are disabled.
        if not bool(getattr(algo, 'EOM_SNAPSHOT_ENABLED', False)) and not bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
            return
        # If requested, emit an early CSV preview as soon as we see the first fill
        try:
            if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)) and bool(getattr(algo, 'PERF_CSV_PREVIEW_EARLY', False)) and not bool(getattr(algo, '_csv_preview_printed_early', False)):
                _log_csv_preview_head(algo)
                algo._csv_preview_printed_early = True
        except Exception:
            pass
        if getattr(orderEvent, 'Status', None) != getattr(getattr(orderEvent, 'Status', None), 'Filled', None) and str(getattr(orderEvent, 'Status', '')) != 'Filled':
            # Guard: if enum not available, fall back to string check above
            if str(getattr(orderEvent, 'Status', '')) != 'Filled':
                return
        sym = getattr(orderEvent, 'Symbol', None)
        if sym is None:
            return
        try:
            holding = algo.Portfolio[sym]
            qty_after = float(getattr(holding, 'Quantity', 0)) if holding is not None else None
        except Exception:
            qty_after = None
        try:
            fill_qty = float(getattr(orderEvent, 'FillQuantity', 0.0))
        except Exception:
            fill_qty = 0.0
        qty_before = None
        try:
            if qty_after is not None and fill_qty is not None:
                qty_before = float(qty_after) - float(fill_qty)
        except Exception:
            pass
        ev_time = getattr(orderEvent, 'UtcTime', None) or getattr(orderEvent, 'Time', None) or getattr(algo, 'Time', None)
        sym_val = getattr(sym, 'Value', str(sym))
        fill_price = float(getattr(orderEvent, 'FillPrice', 0.0))
        # Open: 0 -> non-zero
        try:
            if qty_before is not None and abs(qty_before) == 0 and qty_after is not None and abs(qty_after) > 0 and abs(fill_qty) > 0:
                algo._ledger_opens.append({'time': ev_time, 'symbol': sym_val, 'fill_qty': fill_qty, 'fill_price': fill_price})
                if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
                    try:
                        algo._perf_csv_rows.append({
                            'event_type': 'OPEN',
                            'time': ev_time,
                            'symbol': sym_val,
                            'qty': qty_after,
                            'weight': '',
                            'price': fill_price,
                            'mv': '',
                            'action': 'OPEN',
                            'fill_qty': fill_qty,
                            'fill_price': fill_price,
                            'total_value': getattr(algo.Portfolio, 'TotalPortfolioValue', ''),
                        })
                    except Exception:
                        pass
        except Exception:
            pass
        # Close: -> 0
        if qty_after == 0 and abs(fill_qty) != 0:
            try:
                algo._ledger_closes.append({'time': ev_time, 'symbol': sym_val, 'fill_qty': fill_qty, 'fill_price': fill_price})
            except Exception:
                pass
            try:
                algo._eom_liquidated.append({'symbol': sym_val, 'time': ev_time, 'fill_price': fill_price, 'fill_qty': fill_qty})
            except Exception:
                pass
            try:
                # Track for same-day liquidation reporting
                algo._day_liquidated.append({'symbol': sym_val, 'time': ev_time, 'fill_price': fill_price, 'fill_qty': fill_qty})
            except Exception:
                pass
            if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
                try:
                    algo._perf_csv_rows.append({
                        'event_type': 'CLOSE',
                        'time': ev_time,
                        'symbol': sym_val,
                        'qty': 0,
                        'weight': '',
                        'price': fill_price,
                        'mv': '',
                        'action': 'CLOSE',
                        'fill_qty': fill_qty,
                        'fill_price': fill_price,
                        'total_value': getattr(algo.Portfolio, 'TotalPortfolioValue', ''),
                    })
                except Exception:
                    pass
    except Exception:
        pass


def eom_snapshot(algo: Any) -> None:
    # Print CSV header once per run, and ensure log lines match CSV exactly
    header_printed = getattr(algo, '_eom_csv_header_printed', False)
    """Log EOM positions, log liquidations, and store a full snapshot for end-of-run reporting."""
    # Accumulate EOM rows for ObjectStore CSV
    if not hasattr(algo, "_eom_perf_rows"):
        algo._eom_perf_rows = []
    try:
        # Verbose EOM log toggle (default OFF, enable with QC Parameter EOM_VERBOSE_LOG)
        raw_eom_verbose = getattr(algo, 'EOM_VERBOSE_LOG', None)
        if raw_eom_verbose is None:
            raw_eom_verbose = algo.GetParameter("EOM_VERBOSE_LOG") if hasattr(algo, 'GetParameter') else None
        eom_verbose_enabled = str(raw_eom_verbose).strip().lower() in ("1","true","yes","y","on")
        # Ensure monthly snapshot runs once per month
        try:
            dt = algo.Time
            month_key = (dt.year, dt.month)
            if getattr(algo, '_last_eom_month', None) == month_key:
                return
            algo._last_eom_month = month_key
        except Exception:
            pass
        # Early CSV preview (once) before printing detailed EOM data
        try:
            if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)) and bool(getattr(algo, 'PERF_CSV_PREVIEW_EARLY', False)) and not bool(getattr(algo, '_csv_preview_printed_early', False)):
                _log_csv_preview_head(algo)
                algo._csv_preview_printed_early = True
        except Exception:
            pass
        # Gather holdings
        held = []
        try:
            for h in getattr(algo, 'Portfolio', {}).Values:
                try:
                    if getattr(h, 'Invested', False) and abs(float(getattr(h, 'Quantity', 0))) > 0:
                        sym = getattr(getattr(h, 'Symbol', None), 'Value', None)
                        qty = float(getattr(h, 'Quantity', 0))
                        price = float(getattr(h, 'Price', 0))
                        mval = float(getattr(h, 'HoldingsValue', 0))
                        held.append((sym, qty, price, mval))
                except Exception:
                    continue
        except Exception:
            pass
        held.sort(key=lambda x: abs(x[3]), reverse=True)
        total_val = float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0)) if hasattr(algo, 'Portfolio') else 0.0
        if eom_verbose_enabled:
            algo.Log(f"EOM POSITIONS | {algo.Time:%Y-%m-%d}")
            for sym, qty, price, mval in held[:25]:
                try:
                    weight = (mval / total_val) if total_val > 0 else 0.0
                    algo.Log(f"{sym} | Qty: {qty:.0f} | Weight: {weight:.4f} | Close: {price:.2f} | MV: {mval:.2f}")
                    if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
                        try:
                            algo._perf_csv_rows.append({
                                'event_type': 'EOM_POSITION',
                                'time': getattr(algo, 'Time', None),
                                'symbol': sym,
                                'qty': qty,
                                'weight': weight,
                                'price': price,
                                'mv': mval,
                                'action': '',
                                'fill_qty': '',
                                'fill_price': '',
                                'total_value': total_val,
                            })
                        except Exception:
                            pass
                except Exception:
                    pass
            if len(held) > 25:
                algo.Log(f"... ({len(held) - 25} more)")
        # Persist alphabetical snapshot
        try:
            pos_alpha: List[Dict[str, float]] = []
            for sym, qty, price, mval in held:
                weight = (mval / total_val) if total_val > 0 else 0.0
                pos_alpha.append({'symbol': sym, 'qty': float(qty), 'weight': float(weight), 'close': float(price), 'mv': float(mval)})
            pos_alpha.sort(key=lambda r: (r.get('symbol') or ''))
            algo._ledger_eom_positions.append({'time': algo.Time, 'positions': pos_alpha, 'total_value': float(total_val)})
        except Exception:
            pass
        # Liquidations
        try:
            liq = list(getattr(algo, '_eom_liquidated', []))
        except Exception:
            liq = []
        # Verbose EOM liquidations log toggle (default OFF, enable with QC Parameter EOM_VERBOSE_LOG)
        if liq and eom_verbose_enabled:
            algo.Log(f"EOM LIQUIDATIONS | {algo.Time:%Y-%m-%d} | Count: {len(liq)}")
            for it in liq[:25]:
                try:
                    tstamp = it.get('time')
                    sym = it.get('symbol')
                    fq = it.get('fill_qty')
                    fp = it.get('fill_price')
                    algo.Log(f"{sym} | FillQty: {fq:.0f} | FillPrice: {fp:.2f} | Time: {tstamp}")
                    if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
                        try:
                            algo._perf_csv_rows.append({
                                'event_type': 'EOM_LIQUIDATION',
                                'time': tstamp,
                                'symbol': sym,
                                'qty': '',
                                'weight': '',
                                'price': fp,
                                'mv': '',
                                'action': 'LIQUIDATION',
                                'fill_qty': fq,
                                'fill_price': fp,
                                'total_value': total_val,
                            })
                        except Exception:
                            pass
                except Exception:
                    pass
            if len(liq) > 25:
                algo.Log(f"... ({len(liq) - 25} more)")

        # Unified EOM CSV-style log lines for external performance reporting (requested format)
        # Format: date,portfolio_id,symbol,weight,qty,price
        try:
            date_str = algo.Time.strftime('%Y-%m-%d') if hasattr(algo, 'Time') else ''
            portfolio_id = getattr(algo, 'PERF_PORTFOLIO_ID', 'MP')
            emitted = set()
            # Active holdings lines
            # Print header immediately before first EOM line if not already printed
            if not header_printed and held:
                algo.Log('date,portfolio_id,symbol,weight,qty,price,mv,total_value')
                algo._eom_csv_header_printed = True
                header_printed = True
            for sym, qty, price, mval in held:
                try:
                    date_fmt = datetime.strptime(date_str, '%Y-%m-%d').strftime('%Y-%m-%d')
                    weight = (mval / total_val) if total_val > 0 else 0.0
                    mv = qty * price
                    total_value = total_val
                    line = f"{date_fmt},{portfolio_id},{sym},{weight:.4f},{int(qty)},{price:.2f},{mv:.2f},{total_value:.2f}"
                    algo.Log(line)
                    emitted.add(sym)
                    algo._eom_perf_rows.append({
                        "date": date_fmt,
                        "portfolio_id": portfolio_id,
                        "symbol": sym,
                        "weight": weight,
                        "qty": int(qty),
                        "price": price,
                        "mv": mv,
                        "total_value": total_value
                    })
                except Exception:
                    continue
            # Liquidated lines (only if not currently held)
            for it in liq:
                try:
                    sym = it.get('symbol')
                    if sym in emitted:
                        continue
                    fp = it.get('fill_price')
                    date_fmt = datetime.strptime(date_str, '%Y-%m-%d').strftime('%Y-%m-%d')
                    mv = 0.0
                    total_value = total_val
                    line = f"{date_fmt},{portfolio_id},{sym},0.0000,0,{fp:.2f},{mv:.2f},{total_value:.2f}"
                    algo.Log(line)
                    algo._eom_perf_rows.append({
                        "date": date_fmt,
                        "portfolio_id": portfolio_id,
                        "symbol": sym,
                        "weight": 0.0,
                        "qty": 0,
                        "price": fp,
                        "mv": mv,
                        "total_value": total_value
                    })
                except Exception:
                    continue
            # Benchmark line (BM) with close price, blank weight & integer quantity fields
            try:
                bm_symbol_obj = getattr(algo, 'BENCHMARK_SYMBOL', None)
                bm_sym = getattr(bm_symbol_obj, 'Value', None) if bm_symbol_obj is not None else None
                bm_price = None
                if bm_symbol_obj is not None:
                    sec = algo.Securities.get(bm_symbol_obj, None)
                    if sec is not None:
                        bm_price = float(getattr(sec, 'Price', 0.0))
                if bm_sym is not None and bm_price is not None:
                    date_fmt = datetime.strptime(date_str, '%Y-%m-%d').strftime('%Y-%m-%d')
                    mv = ""
                    total_value = total_val
                    bm_line = f"{date_fmt},BM,{bm_sym},,,{bm_price:.2f},{mv},{total_value:.2f}"
                    algo.Log(bm_line)
                    algo._eom_perf_rows.append({
                        "date": date_fmt,
                        "portfolio_id": "BM",
                        "symbol": bm_sym,
                        "weight": None,
                        "qty": None,
                        "price": bm_price,
                        "mv": mv,
                        "total_value": total_value
                    })
            except Exception:
                pass
        except Exception:
            pass
        # Reset monthly liquidations
        algo._eom_liquidated = []
        # Reset day liquidations if month boundary coincides with day snapshot reset
        algo._day_liquidated = []
    except Exception:
        # Outer guard: never let an EOM snapshot failure stop the algorithm
        pass


def daily_snapshot(algo: Any) -> None:
    """Log all current positions at market close (daily) plus any symbols liquidated during the day.

    Format lines (one per symbol):
        POS,YYYY-MM-DD,<portfolio_id>,<symbol>,<weight>,<quantity>,<close_price>
    Liquidated during day (not currently held): weight=0.0000, quantity=0, price=last fill price.
    Controlled by DAILY_SNAPSHOT parameter (truthy -> enabled).
    """
    try:
        if not bool(getattr(algo, 'DAILY_SNAPSHOT_ENABLED', False)):
            return
        # Guard: run once per date
        try:
            dt = getattr(algo, 'Time', None)
            day_key = (dt.year, dt.month, dt.day) if hasattr(dt, 'year') else None
            if day_key is not None and getattr(algo, '_last_day_key', None) == day_key:
                return
            algo._last_day_key = day_key
        except Exception:
            pass
        # Gather holdings
        held = []
        total_val = float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0)) if hasattr(algo, 'Portfolio') else 0.0
        try:
            for h in getattr(algo, 'Portfolio', {}).Values:
                try:
                    if getattr(h, 'Invested', False) and abs(float(getattr(h, 'Quantity', 0))) > 0:
                        sym = getattr(getattr(h, 'Symbol', None), 'Value', None)
                        qty = float(getattr(h, 'Quantity', 0))
                        price = float(getattr(h, 'Price', 0))
                        mval = float(getattr(h, 'HoldingsValue', 0))
                        held.append((sym, qty, price, mval))
                except Exception:
                    continue
        except Exception:
            pass
        held.sort(key=lambda x: abs(x[3]), reverse=True)
        # Liquidations captured today
        try:
            day_liq = list(getattr(algo, '_day_liquidated', []))
        except Exception:
            day_liq = []
        date_str = getattr(algo, 'Time').strftime('%Y-%m-%d') if hasattr(getattr(algo, 'Time'), 'strftime') else ''
        portfolio_id = getattr(algo, 'PERF_PORTFOLIO_ID', 'MP')
        emitted = set()
        # Active positions -> uniform EOM-style daily lines
        for sym, qty, price, mval in held:
            try:
                weight = (mval / total_val) if total_val > 0 else 0.0
                line = f"EOM,{date_str},{portfolio_id},{sym},{weight:.4f},{qty:.8f},{price:.2f}"
                algo.Log(line)
                emitted.add(sym)
            except Exception:
                continue
        # Liquidated positions (not currently held) -> weight=0, qty=0, last fill price
        for it in day_liq:
            try:
                sym = it.get('symbol')
                if sym in emitted:
                    continue
                fp = it.get('fill_price')
                line = f"EOM,{date_str},{portfolio_id},{sym},0.0000,0,{fp:.2f}"
                algo.Log(line)
            except Exception:
                continue
        # Benchmark line (BM) with close price, blank weight & quantity fields
        try:
            bm_sym_obj = getattr(getattr(algo, 'spy', None), 'Symbol', None)
            bm_sym = getattr(bm_sym_obj, 'Value', None)
            if bm_sym:
                bm_price = float(getattr(getattr(algo, 'spy', None), 'Price', 0.0))
                bm_line = f"EOM,{date_str},BM,{bm_sym},,,{bm_price:.2f}"
                algo.Log(bm_line)
        except Exception:
            pass
        # Reset day liquidations for next day
        algo._day_liquidated = []
    except Exception:
        pass
    except Exception:
        pass


def _timestamp_key(prefix: str = 'perf/perf_', dt: Any = None) -> str:
    try:
        t = dt or datetime.utcnow()
        # If algo.Time provided and is datetime, prefer it for determinism
        if hasattr(dt, 'strftime'):
            t = dt
        return f"{prefix}{t:%Y%m%d_%H%M%S}.csv"
    except Exception:
        # Fallback to epoch-based suffix
        try:
            return f"{prefix}{int(datetime.utcnow().timestamp())}.csv"
        except Exception:
            return f"{prefix}latest.csv"


def print_end_of_algorithm_perf(algo: Any) -> None:
    """Print a consolidated, chronological timeline of opens, closes, and EOM snapshots."""
    try:
        # If neither EOM snapshot nor CSV export is enabled, skip.
        if (not bool(getattr(algo, 'EOM_SNAPSHOT_ENABLED', False))) and (not bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False))):
            return
        # If requested and not already printed, log a small CSV head preview first
        try:
            if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)) and bool(getattr(algo, 'PERF_CSV_PREVIEW_EARLY', False)) and not bool(getattr(algo, '_csv_preview_printed_early', False)):
                _log_csv_preview_head(algo)
                algo._csv_preview_printed_early = True
        except Exception:
            pass
        # Quick summary of accumulated rows before detailed timeline
        try:
            n_rows = len(list(getattr(algo, '_perf_csv_rows', [])))
        except Exception:
            n_rows = 0
        try:
            algo.Log(f"[CSV] Rows accumulated: {n_rows}")
            if not bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
                algo.Log("[CSV] Export disabled; will not write ObjectStore file")
        except Exception:
            pass
        # Perform an early save using a timestamped key BEFORE heavy timeline logs
        if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)) and n_rows > 0:
            try:
                ts_key = _timestamp_key(dt=getattr(algo, 'Time', None))
                export_perf_csv(algo, key=ts_key, also_latest=bool(getattr(algo, 'PERF_CSV_WRITE_LATEST', True)))
            except Exception:
                pass
        events: Dict[Any, Dict[str, Any]] = {}
        # Opens
        for ev in list(getattr(algo, '_ledger_opens', [])):
            t = ev.get('time')
            if t not in events:
                events[t] = {'OPENED': [], 'CLOSED': [], 'EOM': None}
            events[t]['OPENED'].append(ev)
        # Closes
        for ev in list(getattr(algo, '_ledger_closes', [])):
            t = ev.get('time')
            if t not in events:
                events[t] = {'OPENED': [], 'CLOSED': [], 'EOM': None}
            events[t]['CLOSED'].append(ev)
        # EOM snapshots
        for snap in list(getattr(algo, '_ledger_eom_positions', [])):
            t = snap.get('time')
            if t not in events:
                events[t] = {'OPENED': [], 'CLOSED': [], 'EOM': None}
            events[t]['EOM'] = snap
        # Chronological print
        for t in sorted(events.keys()):
            bucket = events[t]
            opened = sorted(bucket.get('OPENED', []), key=lambda r: (r.get('symbol') or ''))
            closed = sorted(bucket.get('CLOSED', []), key=lambda r: (r.get('symbol') or ''))
            snap = bucket.get('EOM')
            ts_open = t.strftime('%Y-%m-%d %H:%M') if hasattr(t, 'strftime') else str(t)
            if opened:
                algo.Log(f"OPENED | {ts_open}")
                for r in opened:
                    algo.Log(f"{r.get('symbol')} | Qty: {r.get('fill_qty'):.0f} | FillPrice: {r.get('fill_price'):.2f}")
                    if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
                        try:
                            algo._perf_csv_rows.append({
                                'event_type': 'OPEN',
                                'time': r.get('time'),
                                'symbol': r.get('symbol'),
                                'qty': r.get('fill_qty'),
                                'weight': '',
                                'price': r.get('fill_price'),
                                'mv': '',
                                'action': 'OPEN',
                                'fill_qty': r.get('fill_qty'),
                                'fill_price': r.get('fill_price'),
                                'total_value': getattr(algo.Portfolio, 'TotalPortfolioValue', ''),
                            })
                        except Exception:
                            pass
            if closed:
                algo.Log(f"CLOSED | {ts_open}")
                for r in closed:
                    algo.Log(f"{r.get('symbol')} | Qty: {r.get('fill_qty'):.0f} | FillPrice: {r.get('fill_price'):.2f}")
                    if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
                        try:
                            algo._perf_csv_rows.append({
                                'event_type': 'CLOSE',
                                'time': r.get('time'),
                                'symbol': r.get('symbol'),
                                'qty': r.get('fill_qty'),
                                'weight': '',
                                'price': r.get('fill_price'),
                                'mv': '',
                                'action': 'CLOSE',
                                'fill_qty': r.get('fill_qty'),
                                'fill_price': r.get('fill_price'),
                                'total_value': getattr(algo.Portfolio, 'TotalPortfolioValue', ''),
                            })
                        except Exception:
                            pass
            if snap:
                ts_eom = t.strftime('%Y-%m-%d') if hasattr(t, 'strftime') else str(t)
                algo.Log(f"EOM POSITIONS | {ts_eom}")
                for p in list(snap.get('positions', [])):
                    algo.Log(f"{p.get('symbol')} | Qty: {p.get('qty'):.0f} | Weight: {p.get('weight'):.4f} | Close: {p.get('close'):.2f} | MV: {p.get('mv'):.2f}")
                    if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
                        try:
                            algo._perf_csv_rows.append({
                                'event_type': 'EOM_POSITION',
                                'time': snap.get('time'),
                                'symbol': p.get('symbol'),
                                'qty': p.get('qty'),
                                'weight': p.get('weight'),
                                'price': p.get('close'),
                                'mv': p.get('mv'),
                                'action': '',
                                'fill_qty': '',
                                'fill_price': '',
                                'total_value': snap.get('total_value'),
                            })
                        except Exception:
                            pass
        # Final CSV export (timestamped again to capture any last-minute rows)
        if bool(getattr(algo, 'PERF_EXPORT_CSV_ENABLED', False)):
            try:
                ts_key2 = _timestamp_key(dt=getattr(algo, 'Time', None))
                export_perf_csv(algo, key=ts_key2, also_latest=bool(getattr(algo, 'PERF_CSV_WRITE_LATEST', True)), log_label='final')
            except Exception:
                pass
        # Optional: dump full CSV to logs in chunks for manual recovery
        try:
            _dump_full_csv(algo)
        except Exception:
            pass
    except Exception:
        pass


def _log_csv_preview_head(algo: Any) -> None:
    """Log a small CSV head preview to the QC log, before heavy logs.

    Prints the header and the first N rows accumulated so far.
    Controlled by PERF_CSV_PREVIEW_EARLY and PERF_CSV_PREVIEW_HEAD_N.
    """
    try:
        rows = list(getattr(algo, '_perf_csv_rows', []))
        if not rows:
            # Still print header so users know schema
            header_only = True
        else:
            header_only = False
        fieldnames = ['event_type','time','symbol','qty','weight','price','mv','action','fill_qty','fill_price','total_value']
        # Build preview text
        sio = StringIO()
        writer = csv.DictWriter(sio, fieldnames=fieldnames)
        writer.writeheader()
        if not header_only:
            limit = max(1, int(getattr(algo, 'PERF_CSV_PREVIEW_HEAD_N', 25) or 25))
            for r in rows[:limit]:
                out = {}
                for k in fieldnames:
                    v = r.get(k, '')
                    if k == 'time' and v and hasattr(v, 'strftime'):
                        v = v.strftime('%Y-%m-%d %H:%M:%S')
                    out[k] = v
                writer.writerow(out)
        # Emit to logs
        text = sio.getvalue()
        lines = text.splitlines()
        algo.Log('[CSV][PreviewEarly] Head preview follows:')
        max_lines = min(len(lines), (1 + int(getattr(algo, 'PERF_CSV_PREVIEW_HEAD_N', 25) or 25)))
        for ln in lines[:max_lines]:
            algo.Log(ln[:400])
    except Exception:
        pass


def export_perf_csv(algo: Any, key: str = None, also_latest: bool = True, log_label: str = 'early') -> None:
    """Export accumulated performance rows to ObjectStore as a CSV.

    key: Optional ObjectStore key; if None, a timestamped key is generated under 'perf/'.
    also_latest: When True, additionally writes/overwrites 'perf/latest.csv' convenience file.
    log_label: 'early' or 'final' marker for logs.
    """
    try:
        rows = list(getattr(algo, '_perf_csv_rows', []))
        if not rows:
            return
        # Build CSV deterministically
        fieldnames = ['event_type','time','symbol','qty','weight','price','mv','action','fill_qty','fill_price','total_value']
        sio = StringIO()
        writer = csv.DictWriter(sio, fieldnames=fieldnames)
        writer.writeheader()
        for r in rows:
            try:
                out = {}
                for k in fieldnames:
                    v = r.get(k, '')
                    # Normalize time
                    if k == 'time' and v and hasattr(v, 'strftime'):
                        v = v.strftime('%Y-%m-%d %H:%M:%S')
                    out[k] = v
                writer.writerow(out)
            except Exception:
                continue
        # Resolve final keys
        text = sio.getvalue()
        obj_key = key if (key and str(key).strip() != '') else _timestamp_key(dt=getattr(algo, 'Time', None))
        latest_key = 'perf/latest.csv'
        # Try saving as string first (Python interop can prefer str overload)
        saved = False
        try:
            res = algo.ObjectStore.Save(obj_key, text)
            saved = bool(res) if res is not None else False
            if saved:
                algo.Log(f"[CSV] {obj_key} saved to ObjectStore (string, {log_label})")
        except Exception:
            saved = False
        # If not saved, try bytes variant
        if not saved:
            try:
                data = text.encode('utf-8')
                res2 = algo.ObjectStore.Save(obj_key, data)
                saved = bool(res2) if res2 is not None else False
                if saved:
                    algo.Log(f"[CSV] {obj_key} saved to ObjectStore (bytes, {log_label})")
            except Exception:
                saved = False
        # Optional: verify presence
        if saved:
            try:
                if hasattr(algo.ObjectStore, 'ContainsKey'):
                    present = bool(algo.ObjectStore.ContainsKey(obj_key))
                    if not present:
                        algo.Log(f"[CSV][Warn] Save() reported success but key not found via ContainsKey: {obj_key}")
            except Exception:
                pass
            # Write/overwrite 'latest' for convenience
            if also_latest:
                try:
                    # Try string first
                    resL = algo.ObjectStore.Save(latest_key, text)
                    if resL:
                        algo.Log(f"[CSV] {latest_key} updated (string, {log_label})")
                    else:
                        # try bytes
                        algo.ObjectStore.Save(latest_key, text.encode('utf-8'))
                        algo.Log(f"[CSV] {latest_key} updated (bytes, {log_label})")
                    # Flat key variants (avoid slash for UI quirks)
                    flat_ts = obj_key.replace('/', '__')
                    try:
                        algo.ObjectStore.Save(flat_ts, text)
                        algo.Log(f"[CSV] {flat_ts} saved (flat alias, {log_label})")
                    except Exception:
                        pass
                    # Compressed gzip copy for faster download (base filename .gz)
                    try:
                        gz_bytes = gzip.compress(text.encode('utf-8'))
                        gz_key = obj_key + '.gz'
                        resG = algo.ObjectStore.Save(gz_key, gz_bytes)
                        if resG:
                            algo.Log(f"[CSV] {gz_key} saved (gzip, {log_label}) size={len(gz_bytes)}")
                        # Flat gzip alias
                        flat_gz = flat_ts + '.gz'
                        try:
                            algo.ObjectStore.Save(flat_gz, gz_bytes)
                        except Exception:
                            pass
                    except Exception:
                        pass
                    # Tiny base64 head (first 2KB) to aid manual recovery if download fails
                    try:
                        head_b64 = base64.b64encode(text[:2048].encode('utf-8')).decode('utf-8')
                        algo.Log(f"[CSV][HeadB64] {head_b64}")
                    except Exception:
                        pass
                except Exception:
                    pass
        # Fallback when not saved by either method
        if not saved:
            head = text.splitlines()[:50]
            algo.Log(f"[CSV][Fallback] Save() returned False or failed ({log_label}); head preview:")
            for ln in head:
                algo.Log(ln[:400])
    except Exception:
        pass


def _dump_full_csv(algo: Any) -> None:
    """Dump the entire accumulated CSV into the log in chunked form.

    Controlled by PERF_CSV_DUMP_FULL (truthy) and optional PERF_CSV_DUMP_CHUNK_LINES (default 0 -> disabled).
    To enable: set PERF_CSV_DUMP_FULL=1. Optionally PERF_CSV_DUMP_CHUNK_LINES=200 (lines per chunk, including header in first chunk).
    Safeguards: caps chunk_lines to [10, 1000]. If resulting total estimated bytes exceed ~5MB, abort to avoid log truncation.
    """
    try:
        if not bool(getattr(algo, 'PERF_CSV_DUMP_FULL', False)):
            return
        rows = list(getattr(algo, '_perf_csv_rows', []))
        if not rows:
            algo.Log('[CSV][DumpFull] No rows to dump.')
            return
        # Resolve chunk size
        raw_chunk = int(getattr(algo, 'PERF_CSV_DUMP_CHUNK_LINES', 0) or 0)
        if raw_chunk <= 0:
            # Default if user enabled full dump without specifying chunk size
            raw_chunk = 200
        chunk_lines = max(10, min(1000, int(raw_chunk)))
        fieldnames = ['event_type','time','symbol','qty','weight','price','mv','action','fill_qty','fill_price','total_value']
        sio = StringIO()
        writer = csv.DictWriter(sio, fieldnames=fieldnames)
        writer.writeheader()
        for r in rows:
            try:
                out = {}
                for k in fieldnames:
                    v = r.get(k, '')
                    if k == 'time' and v and hasattr(v, 'strftime'):
                        v = v.strftime('%Y-%m-%d %H:%M:%S')
                    out[k] = v
                writer.writerow(out)
            except Exception:
                continue
        text = sio.getvalue()
        all_lines = text.splitlines()
        total_lines = len(all_lines)
        est_bytes = len(text.encode('utf-8'))
        # Safety: abort very large dumps (heuristic)
        if est_bytes > 5_000_000:  # ~5MB
            algo.Log(f"[CSV][DumpFull][Abort] Estimated size {est_bytes}B exceeds 5MB safety cap; not dumping.")
            return
        # Chunking
        chunks = []
        cur = []
        for i, ln in enumerate(all_lines):
            cur.append(ln)
            if len(cur) >= chunk_lines:
                chunks.append(cur)
                cur = []
        if cur:
            chunks.append(cur)
        algo.Log(f"[CSV][DumpBegin] lines={total_lines} bytes={est_bytes} chunk_lines={chunk_lines} chunks={len(chunks)}")
        for idx, chunk in enumerate(chunks, start=1):
            algo.Log(f"[CSV][DumpChunk {idx}/{len(chunks)}]")
            for ln in chunk:
                # Guard for extremely long lines
                algo.Log(ln[:800])
        algo.Log('[CSV][DumpEnd]')
        # Compressed base64 block for single-step reconstruction (truncated only by log size limits)
        try:
            gz = gzip.compress(text.encode('utf-8'))
            b64 = base64.b64encode(gz).decode('utf-8')
            # If extremely large, just log the first 1MB base64 segment
            max_b64 = 1_400_000  # ~1MB of decoded gzip; adjust if QC log limits change
            if len(b64) > max_b64:
                algo.Log(f"[CSV][DumpB64][Truncated] original_b64_len={len(b64)} logged_len={max_b64}")
                b64_out = b64[:max_b64]
            else:
                algo.Log(f"[CSV][DumpB64] b64_len={len(b64)}")
                b64_out = b64
            # Emit with clear delimiters so user can regex-extract easily
            algo.Log('[CSV][DumpB64][BEGIN]')
            # Split into smaller log lines to reduce risk of truncation per line
            step = 800
            for i in range(0, len(b64_out), step):
                algo.Log(b64_out[i:i+step])
            algo.Log('[CSV][DumpB64][END]')
        except Exception:
            pass
    except Exception:
        pass

# Ranking helpers: batched History, x-cache, linregress reuse
from typing import Any, Dict, List, Optional, Iterable, Tuple
from datetime import timedelta

# Import Lean & scientific stack lazily/optionally
try:
    from AlgorithmImports import *  # type: ignore
except Exception:
    class Resolution:  # type: ignore
        Daily = 0

try:
    import numpy as np  # type: ignore
except Exception:
    np = None  # type: ignore

try:
    from scipy.stats import linregress  # type: ignore
except Exception:
    # Fallback: minimal slope/r from simple regression if scipy unavailable locally
    def linregress(x, y):
        n = len(x)
        if n < 2:
            return type('LR', (), {'slope': float('nan'), 'rvalue': float('nan')})
        mx = sum(x)/n
        my = sum(y)/n
        num = sum((xi-mx)*(yi-my) for xi, yi in zip(x, y))
        den = sum((xi-mx)**2 for xi in x)
        slope = num/den if den != 0 else float('inf')
        # compute r
        den_y = sum((yi-my)**2 for yi in y)
        r = num / ((den*den_y) ** 0.5) if den and den_y else float('nan')
        return type('LR', (), {'slope': slope, 'rvalue': r})


def _series_close(hist, sym):
    try:
        s = hist.loc[sym]
        close = s['close'] if 'close' in s.columns else s.close
        return close.sort_index()
    except Exception:
        return None

def _drop_today(series, algo):
    try:
        if series is None or getattr(algo, 'Time', None) is None:
            return series
        today = algo.Time.date()
        # Keep only bars strictly before today's date
        return series[series.index.date < today]
    except Exception:
        return series


def get_rank(algo: Any, symbol, prices_frame=None, x_cache: Optional[Dict[int, List[float]]] = None, use_r2: bool = False) -> float:
    # Minimal placeholder: not used directly in snapshot below
    return 0.0


def _print_ranking_boundary(algo: Any, period: int, regime: str, topn: int, result_top: List[Tuple[float, Any]], ranks: Dict[Any, Dict[str, Any]]) -> None:
    try:
        k = int(getattr(algo, 'RANKING_BOUNDARY_K', 3))
        use_r2 = bool(getattr(algo, 'R2', True))
        cash_sid = getattr(algo, 'CASH_FILTERING_SID', None)
        total = len(result_top)
        if total == 0 or topn <= 0:
            return
        start = max(0, min(total-1, topn-1) - k)
        end = min(total, (topn-1) + k + 1)
        algo.Debug(f"[diag] Period {period} boundary (regime={regime}, use_r2={use_r2}) — showing ranks {start+1}..{end}")
        for i in range(start, end):
            score, sec = result_top[i]
            rd = ranks.get(sec, {})
            lm_rk, lm_val = rd.get('x_long_momentum', (None, None))
            sm_rk, sm_val = rd.get('x_short_momentum', (None, None))
            vv_rk, vv_val = rd.get('x_vol', (None, None))
            r = rd.get('r', float('nan'))
            r2 = (r*r) if (r == r and r is not None) else float('nan')
            slope = rd.get('slope', float('nan'))
            tag = []
            if i == topn-1:
                tag.append('CUT')
            if sec == cash_sid:
                tag.append('CASH_SID')
            tag_s = (" ["+ ",".join(tag) + "]") if tag else ""
            algo.Debug(
                f"  #{i+1:>3} {getattr(sec,'Value',str(sec)):<8} rnk={score:>8.2f} | lm({lm_rk}={lm_val:.4f}) sm({sm_rk}={sm_val:.4f}) vol({vv_rk}={vv_val:.4f}) r={r:.4f} r2={r2:.4f} slope={slope:.6f}{tag_s}"
            )
    except Exception:
        pass


def update_ranking_snapshot(algo: Any) -> None:
    """Replicate the monolith's ranking (long/short momentum + volatility via r or R^2).

    For each sub-portfolio period:
      - Compute long_momentum over LONG_PERIOD and short_momentum over SHORT_PERIOD
      - Compute volatility metric from linear regression on VOL_PERIOD closes
        (use log prices if algo.log_vol=True and prices > 0)
      - Convert to ranks per metric and aggregate using multipliers to final rnk
      - Sort to pick TOP N and BOTTOM N (excluding CASH_FILTERING_SID from TOP)
    """
    try:
        regime = getattr(algo, 'regime', 'Risk-on')
        # Determine rank_name and base candidate list per regime
        rank_name = 'industry' if regime == 'Risk-on' else 'sector'
        base_list: List[Any] = list(getattr(algo, 'INDUSTRY', [])) if rank_name == 'industry' else list(getattr(algo, 'SECTOR', []))
        if not base_list:
            # Fallback: use INDUSTRY if SECTOR not defined
            base_list = list(getattr(algo, 'INDUSTRY', []))
        if not base_list:
            return
        cash_filter_sid = getattr(algo, 'CASH_FILTERING_SID', None)
        percent_mom = bool(getattr(algo, 'PERCENTAGE_MOMENTUM', True))
        use_r2 = bool(getattr(algo, 'R2', True))
        log_vol = bool(getattr(algo, 'log_vol', False))
        long_mult = float(getattr(algo, 'LONG_MOMENTUM_MULTIPLIER', 0.0))
        short_mult = float(getattr(algo, 'SHORT_MOMENTUM_MULTIPLIER', 0.0))
        vol_mult = float(getattr(algo, 'VOL_MULTIPLIER', 1.0))
        cash_filter_types = list(getattr(algo, 'CASH_FILTER_TYPES', []))

        periods: List[int] = list(getattr(algo, 'LONG_PERIODS', [])) or [int(getattr(algo, 'LONG_PERIOD', 136))]
        for period in periods:
            if rank_name == 'industry':
                # Industry (Risk-on): use full period for long, half for short/vol
                long_period = int(period)
                short_period = max(2, int(period // 2))
                vol_period = max(2, int(period // 2))
                candidates = list(base_list)
            else:
                # Sector (Risk-off): compute using half windows relative to Industry.
                # Keep display/keys as the full 'period' value; only the effective window lengths are reduced.
                long_period = max(2, int(period // 2))
                short_period = max(2, int(long_period // 2))  # effectively period//4
                vol_period = max(2, int(long_period // 2))    # effectively period//4
                candidates = list(base_list)
            # Determine if CASH_FILTERING_SID should be included as a ranking candidate
            include_cash_sid = None
            if cash_filter_sid is not None and (rank_name in cash_filter_types):
                include_cash_sid = cash_filter_sid
            # Optional: allow DIAG_SERIES_DUMP to target a symbol not in candidates
            dump_sym = None
            try:
                sym_filter_str = getattr(algo, 'DIAG_SERIES_DUMP_SYMBOL', None)
                if bool(getattr(algo, 'DIAG_SERIES_DUMP', False)) and sym_filter_str:
                    try:
                        dump_sym = algo.Symbol(str(sym_filter_str))
                    except Exception:
                        dump_sym = None
            except Exception:
                dump_sym = None
            # Pull a generous batch of bars for all candidates
            # Pull enough bars to cover long momentum (N+1 closes) and regression windows
            bars = max(long_period, short_period, vol_period) * 2
            hist_syms = list(candidates) + ([include_cash_sid] if include_cash_sid else [])
            if dump_sym is not None and dump_sym not in hist_syms:
                hist_syms.append(dump_sym)
            hist = algo.History(hist_syms, bars, Resolution.Daily)
            if hist is None or hist.empty:
                continue
            ranking: List[tuple] = []
            for sec in sorted(set(hist_syms), key=lambda s: s.Value if hasattr(s, 'Value') else str(s)):
                if sec is None:
                    continue
                is_candidate = (sec in candidates) or (include_cash_sid is not None and sec == include_cash_sid)
                # Long momentum: use (long_period + 1) closes to span exactly long_period intervals
                try:
                    s_long = _series_close(hist, sec)
                    s_long = _drop_today(s_long, algo)
                    if s_long is None or len(s_long) < (long_period + 1):
                        continue
                    lhist = s_long[-(long_period + 1):]
                    raw_long = float(lhist.iloc[-1]) - float(lhist.iloc[0])
                    long_momentum = (raw_long / float(lhist.iloc[0])) if percent_mom and float(lhist.iloc[0]) != 0 else raw_long
                except Exception:
                    continue
                # Short momentum: use (short_period + 1) closes to span exactly short_period intervals
                try:
                    s_short = _series_close(hist, sec)
                    s_short = _drop_today(s_short, algo)
                    if s_short is None or len(s_short) < (short_period + 1):
                        short_momentum = 0.0
                    else:
                        shist = s_short[-(short_period + 1):]
                        raw_short = float(shist.iloc[-1]) - float(shist.iloc[0])
                        short_momentum = (raw_short / float(shist.iloc[0])) if percent_mom and float(shist.iloc[0]) != 0 else raw_short
                except Exception:
                    short_momentum = 0.0
                # Volatility via regression (r or R^2)
                try:
                    s_vol = _series_close(hist, sec)
                    s_vol = _drop_today(s_vol, algo)
                    vhist = s_vol[-vol_period:]
                    x = list(range(len(vhist)))
                    y_series = np.log(vhist.values) if (np is not None and log_vol and np.all(vhist.values > 0)) else vhist.values
                    res = linregress(x, y_series)
                    r = getattr(res, 'rvalue', float('nan'))
                    volatility = (r*r) if use_r2 and not (np is None or np.isnan(r)) else r
                    slope = getattr(res, 'slope', float('nan'))
                except Exception:
                    volatility = float('nan')
                    slope = float('nan')
                    r = float('nan')
                # If this is a dump-only symbol (not a ranking candidate), print series dump and skip ranking append
                if not is_candidate:
                    try:
                        if bool(getattr(algo, 'DIAG_SERIES_DUMP', False)) and dump_sym is not None and sec == dump_sym:
                            # Counts: momentum uses N+1 closes (intervals=L_N), volatility uses exactly V_N closes
                            L_N = (len(lhist) - 1) if hasattr(lhist, '__len__') else 0
                            S_N = (len(shist) - 1) if 'shist' in locals() and hasattr(shist, '__len__') and len(shist) > 0 else 0
                            V_N = len(vhist) if hasattr(vhist, '__len__') else 0
                            lm_dt0 = lhist.index[0] if hasattr(lhist, 'index') and len(lhist) > 0 else None
                            lm_dt1 = lhist.index[-1] if hasattr(lhist, 'index') and len(lhist) > 0 else None
                            lm_v0 = float(lhist.iloc[0]) if len(lhist) > 0 else None
                            lm_v1 = float(lhist.iloc[-1]) if len(lhist) > 0 else None
                            sm_dt0 = shist.index[0] if 'shist' in locals() and hasattr(shist, 'index') and len(shist) > 0 else None
                            sm_dt1 = shist.index[-1] if 'shist' in locals() and hasattr(shist, 'index') and len(shist) > 0 else None
                            sm_v0 = float(shist.iloc[0]) if 'shist' in locals() and len(shist) > 0 else None
                            sm_v1 = float(shist.iloc[-1]) if 'shist' in locals() and len(shist) > 0 else None
                            vv_dt0 = vhist.index[0] if hasattr(vhist, 'index') and len(vhist) > 0 else None
                            vv_dt1 = vhist.index[-1] if hasattr(vhist, 'index') and len(vhist) > 0 else None
                            vv_v0 = float(vhist.iloc[0]) if len(vhist) > 0 else None
                            vv_v1 = float(vhist.iloc[-1]) if len(vhist) > 0 else None
                            algo.Debug(
                                f"[series_dump] P={period} rank={rank_name} sym={getattr(sec,'Value',str(sec))} L_N={L_N} S_N={S_N} V_N={V_N} "
                                f"long:({lm_dt0:%Y-%m-%d},{lm_v0})→({lm_dt1:%Y-%m-%d},{lm_v1}) "
                                f"short:({sm_dt0:%Y-%m-%d},{sm_v0})→({sm_dt1:%Y-%m-%d},{sm_v1}) "
                                f"vol:({vv_dt0:%Y-%m-%d},{vv_v0})→({vv_dt1:%Y-%m-%d},{vv_v1}) [dump-only]"
                            )
                    except Exception:
                        pass
                    continue

                ranking.append((sec, long_momentum, short_momentum, volatility, slope, r, lhist))

                # Optional series dump for exact windows used (first/last dates and values)
                try:
                    if bool(getattr(algo, 'DIAG_SERIES_DUMP', False)):
                        sym_filter = getattr(algo, 'DIAG_SERIES_DUMP_SYMBOL', None)
                        want = (sym_filter is None) or (getattr(sec, 'Value', str(sec)) == sym_filter)
                        if want:
                            # Counts: momentum uses N+1 closes (intervals=L_N), volatility uses exactly V_N closes
                            L_N = (len(lhist) - 1) if hasattr(lhist, '__len__') else 0
                            S_N = (len(shist) - 1) if 'shist' in locals() and hasattr(shist, '__len__') and len(shist) > 0 else 0
                            V_N = len(vhist) if hasattr(vhist, '__len__') else 0
                            lm_dt0 = lhist.index[0] if hasattr(lhist, 'index') and len(lhist) > 0 else None
                            lm_dt1 = lhist.index[-1] if hasattr(lhist, 'index') and len(lhist) > 0 else None
                            lm_v0 = float(lhist.iloc[0]) if len(lhist) > 0 else None
                            lm_v1 = float(lhist.iloc[-1]) if len(lhist) > 0 else None
                            sm_dt0 = shist.index[0] if 'shist' in locals() and hasattr(shist, 'index') and len(shist) > 0 else None
                            sm_dt1 = shist.index[-1] if 'shist' in locals() and hasattr(shist, 'index') and len(shist) > 0 else None
                            sm_v0 = float(shist.iloc[0]) if 'shist' in locals() and len(shist) > 0 else None
                            sm_v1 = float(shist.iloc[-1]) if 'shist' in locals() and len(shist) > 0 else None
                            vv_dt0 = vhist.index[0] if hasattr(vhist, 'index') and len(vhist) > 0 else None
                            vv_dt1 = vhist.index[-1] if hasattr(vhist, 'index') and len(vhist) > 0 else None
                            vv_v0 = float(vhist.iloc[0]) if len(vhist) > 0 else None
                            vv_v1 = float(vhist.iloc[-1]) if len(vhist) > 0 else None
                            algo.Debug(
                                f"[series_dump] P={period} rank={rank_name} sym={getattr(sec,'Value',str(sec))} L_N={L_N} S_N={S_N} V_N={V_N} "
                                f"long:({lm_dt0:%Y-%m-%d},{lm_v0})→({lm_dt1:%Y-%m-%d},{lm_v1}) "
                                f"short:({sm_dt0:%Y-%m-%d},{sm_v0})→({sm_dt1:%Y-%m-%d},{sm_v1}) "
                                f"vol:({vv_dt0:%Y-%m-%d},{vv_v0})→({vv_dt1:%Y-%m-%d},{vv_v1})"
                            )
                except Exception:
                    pass

            # Build ranks per metric
            ranks: Dict[Any, Dict[str, Any]] = {}
            for i, name in enumerate(('x_long_momentum', 'x_short_momentum', 'x_vol')):
                if name in ('x_long_momentum', 'x_short_momentum'):
                    reverse = True
                else:
                    # x_vol: reverse True when using R2 (or other higher-is-better vol metrics)
                    reverse = True if use_r2 else False
                for num, item in enumerate(sorted(ranking, key=lambda x: x[i+1], reverse=reverse)):
                    if item[0] not in ranks:
                        ranks[item[0]] = {}
                    ranks[item[0]][name] = (num+1, item[i+1])
                    ranks[item[0]]['slope'] = item[4]
                    ranks[item[0]]['r'] = item[5]
                    try:
                        rv = float(item[5])
                        ranks[item[0]]['r2'] = rv * rv
                    except Exception:
                        ranks[item[0]]['r2'] = float('nan')
                    if name == 'x_long_momentum':
                        ranks[item[0]]['lhist'] = item[6]

            # Aggregate to final rnk
            result_top: List[tuple] = []
            result_bottom: List[tuple] = []
            for sec in ranks:
                lm_rank = ranks[sec]['x_long_momentum'][0]
                sm_rank = ranks[sec]['x_short_momentum'][0]
                vol_rank = ranks[sec]['x_vol'][0]
                rnk = lm_rank * long_mult + sm_rank * short_mult + vol_rank * vol_mult
                ranks[sec]['final_score'] = rnk
                if regime == 'Risk-on':
                    if int(getattr(algo, 'INDUSTRY_BOTN', max(1, int(getattr(algo, 'INDUSTRY_TOPN', 10)) // 2))) > 0:
                        result_bottom.append((rnk, sec))
                else:
                    # In Risk-off we won’t short by default; bottom list remains empty
                    pass
                result_top.append((rnk, sec))

            # Sort and clip
            if regime == 'Risk-on':
                topn = int(getattr(algo, 'INDUSTRY_TOPN', 10))
                botn = int(getattr(algo, 'INDUSTRY_BOTN', max(1, topn // 2)))
            else:
                topn = int(getattr(algo, 'SECTOR_TOPN', 10))
                botn = 0
            result_top = sorted(result_top)
            result_bottom = sorted(result_bottom, reverse=True)
            if len(result_top) > topn:
                result_top = result_top[:topn]
            if botn <= 0:
                result_bottom = []
            elif len(result_bottom) > botn:
                result_bottom = result_bottom[:botn]

            # Exclude CASH_FILTERING_SID via truncation per tri-state policy (Nuanced Cash boundary)
            cash_removed_idx = None
            candidates_count = len(ranking)
            cash_in_candidates = any(t[0] == cash_filter_sid for t in ranking) if cash_filter_sid is not None else False
            topn_before = len(result_top)
            cash_index_before = None
            if cash_filter_sid is not None:
                for j, (rk, sc) in enumerate(result_top):
                    if sc == cash_filter_sid:
                        cash_index_before = j
                        break
            # If cash filter is in TopN and policy includes cash, truncate non-cash list at boundary; remaining allocation goes to cash complex
            if cash_filter_sid is not None and include_cash_sid is not None:
                for i, (rnk, sec) in enumerate(result_top):
                    if sec == cash_filter_sid:
                        cash_removed_idx = i
                        result_top = result_top[:i]
                        break

            top_list = [sec for (rnk, sec) in result_top]
            bottom_list = [sec for (rnk, sec) in result_bottom]

            if hasattr(algo, 'sub_portfolios') and period in algo.sub_portfolios:
                algo.sub_portfolios[period]['topranked'] = top_list
                algo.sub_portfolios[period]['bottomranked'] = bottom_list
            # Store trace for diagnostics
            try:
                if not hasattr(algo, 'last_ranking') or not isinstance(getattr(algo, 'last_ranking'), dict):
                    algo.last_ranking = {}
                algo.last_ranking[period] = {
                    'regime': regime,
                    'use_r2': use_r2,
                    'topn': int(getattr(algo, 'INDUSTRY_TOPN', 10)) if regime == 'Risk-on' else int(getattr(algo, 'SECTOR_TOPN', 10)),
                    'scores': list(result_top),
                    'ranks': ranks,
                    'cash_removed_index': cash_removed_idx,
                    'boundary_meta': {
                        'candidates': candidates_count,
                        'cash_in_candidates': cash_in_candidates,
                        # Record active control surface solely via CASH_FILTER_TYPES
                        'policy': f"CASH_FILTER_TYPES={cash_filter_types}",
                        'topN_before': topn_before,
                        'topN_after': len(result_top),
                        'cash_index_before': cash_index_before if cash_index_before is not None else -1,
                    }
                }
            except Exception:
                pass
            # Optional boundary print
            if bool(getattr(algo, 'DIAG_RANKING_BOUNDARY', False)):
                _print_ranking_boundary(algo, period, regime, topn, result_top, ranks)
            # Optional one-line boundary metadata similar to COMP 2
            if bool(getattr(algo, 'DIAG_BOUNDARY_META', False)):
                try:
                    algo.Debug(
                        f"BoundaryMeta: rank={'industry' if regime=='Risk-on' else 'sector'}, regime={regime}, candidates={candidates_count}, "
                        f"cash_in_candidates={cash_in_candidates}, policy=CASH_FILTER_TYPES={cash_filter_types}, truncated={cash_removed_idx is not None}, "
                        f"topN_before={topn_before}, topN_after={len(result_top)}, cash_index={-1 if cash_index_before is None else cash_index_before}"
                    )
                except Exception:
                    pass
            # Single-line cash boundary identification
            try:
                if bool(getattr(algo, 'PRINT_CASH_BOUNDARY', False)) and cash_removed_idx is not None:
                    cash_label = getattr(cash_filter_sid, 'Value', str(cash_filter_sid)) if cash_filter_sid is not None else 'None'
                    algo.Log(
                        f"[cash_boundary] P={period} rank={'industry' if regime=='Risk-on' else 'sector'} TopN={topn} boundary_index={cash_removed_idx} "
                        f"non_cash_kept={len(top_list)} cash_sid={cash_label}"
                    )
            except Exception:
                pass
        # Flat cache placeholder to match interfaces used elsewhere
        algo.last_ranking = getattr(algo, 'last_ranking', {})
    except Exception:
        return
"""
Rebalance helpers: compute per-sub weights, aggregate/normalize, and submit consolidated MOC orders
This mirrors the in-file logic so behavior remains identical when called from a thin wrapper.
"""

from typing import Any, Dict, List, Tuple, Iterable

try:
    from AlgorithmImports import *  # type: ignore
except Exception:
    pass

from .orders import build_moc_basket, submit_moc_basket


def _compute_subportfolio_weights(
    algo: Any,
    topranked: Iterable,
    bottomranked: Iterable,
    topn: int,
    botn: int,
    cash_filter_sid,
    cash_syms: List[Any],
    nuanced_cash_pct: float,
    hedge_multiplier_industry: float,
) -> Dict[Any, float]:
    """Port of in-file _compute_subportfolio_weights (weights sum ~ 1 - hedge multiplier).
    - Longs: equal across up to topn; leftover goes to cash proxies (with nuanced cash to filter SID)
    - Shorts: distribute -hedge_multiplier_industry across bottomranked
    """
    weights: Dict[Any, float] = {}
    # Longs
    if topn and topn > 0:
        long_count = len(topranked)
        per_long = 1.0 / float(topn)
        for sec in topranked:
            weights[sec] = weights.get(sec, 0.0) + per_long
        # Cash allocation from any unused long slots
        cash_weight = max(0.0, 1.0 - (long_count / float(topn)))
        if cash_weight > 0:
            # Nuanced cash to single filter SID
            ncw = cash_weight * float(nuanced_cash_pct)
            if cash_filter_sid is not None:
                weights[cash_filter_sid] = weights.get(cash_filter_sid, 0.0) + ncw
            remaining = cash_weight - ncw
            if remaining > 0 and cash_syms:
                per_cash = remaining / float(len(cash_syms))
                for cs in cash_syms:
                    if cs == cash_filter_sid:
                        continue
                    weights[cs] = weights.get(cs, 0.0) + per_cash
    # Shorts (hedge)
    if hedge_multiplier_industry and hedge_multiplier_industry > 0 and botn and botn > 0 and bottomranked:
        per_short = -float(hedge_multiplier_industry) / float(botn)
        for sec in bottomranked:
            if sec == cash_filter_sid or sec in (cash_syms or []):
                continue
            weights[sec] = weights.get(sec, 0.0) + per_short
    return weights


def _build_per_sub_weights(algo: Any) -> Dict[int, Dict[Any, float]]:
    """Construct and store per-sub-portfolio weights on algo.sub_portfolio_weights.
    Returns the dict for convenience.
    """
    sub_weights: Dict[int, Dict[Any, float]] = {}
    regime = getattr(algo, 'regime', 'Risk-on')
    nuanced_cash_pct = float(getattr(algo, 'NUANCED_CASH', 0.0)) / 100.0
    cash_filter_sid = getattr(algo, 'CASH_FILTERING_SID', None)
    cash_all = list(getattr(algo, 'CASH', []))
    cash_syms = [s for s in cash_all if s != cash_filter_sid]
    for period, sub in getattr(algo, 'sub_portfolios', {}).items():
        topranked = list(sub.get('topranked', []))
        bottomranked = list(sub.get('bottomranked', []))
        if regime == 'Risk-on':
            topn = int(getattr(algo, 'INDUSTRY_TOPN', 10))
            botn = int(getattr(algo, 'INDUSTRY_BOTN', max(1, topn // 2)))
            hedge_mult = float(getattr(algo, 'HEDGE_MULTIPLIER_INDUSTRY_RISK_ON', 0.0))
        else:
            topn = int(getattr(algo, 'SECTOR_TOPN', 10))
            botn = 0
            hedge_mult = float(getattr(algo, 'HEDGE_MULTIPLIER_INDUSTRY_RISK_OFF', 0.0))
        w = _compute_subportfolio_weights(
            algo,
            topranked,
            bottomranked,
            topn,
            botn,
            cash_filter_sid,
            cash_syms,
            nuanced_cash_pct,
            hedge_mult,
        )
        sub_weights[period] = w
    # Persist back to algo
    setattr(algo, 'sub_portfolio_weights', dict(sub_weights))
    return sub_weights


def _aggregate_and_normalize_targets(algo: Any, sub_port_weights: Dict[int, Dict[Any, float]]) -> Dict[Any, float]:
    # Aggregate weights across subs
    aggregated: Dict[Any, float] = {}
    # Also compute sub-frequency: in how many subs did a symbol receive positive long weight
    freq: Dict[Any, int] = {}
    for wmap in sub_port_weights.values():
        for sym, w in wmap.items():
            aggregated[sym] = aggregated.get(sym, 0.0) + float(w)
            if w > 0:
                freq[sym] = freq.get(sym, 0) + 1
    # Normalize long side to target gross leverage; keep shorts unchanged
    long_leverage = float(getattr(algo, 'long_leverage', 1.30))
    # Match COMP 1: apply cash buffer before leverage
    cash_pct = float(getattr(algo, 'CASH_PERCENT', 0.0)) / 100.0
    target_long = long_leverage * max(0.0, 1.0 - cash_pct)
    # Concentration: default behavior limits non-cash positives to TopN, filling leftover with cash proxies
    try:
        regime = getattr(algo, 'regime', 'Risk-on')
        topn_cap = int(getattr(algo, 'INDUSTRY_TOPN', 10)) if regime == 'Risk-on' else int(getattr(algo, 'SECTOR_TOPN', 10))
        botn_cap = int(getattr(algo, 'INDUSTRY_BOTN', max(1, topn_cap // 2))) if regime == 'Risk-on' else 0
    except Exception:
        topn_cap = 10
        botn_cap = max(1, topn_cap // 2)
    # Identify cash-like symbols
    cash_filter_sid = getattr(algo, 'CASH_FILTERING_SID', None)
    cash_set = set(getattr(algo, 'CASH', []) or [])
    if cash_filter_sid is not None:
        cash_set.add(cash_filter_sid)
    # Split positives into non-cash and cash
    pos_items = [(sym, w) for sym, w in aggregated.items() if w > 0]
    non_cash = [(s, w) for s, w in pos_items if s not in cash_set]
    cash_pos = [(s, w) for s, w in pos_items if s in cash_set]

    # Optional frequency guard: drop non-cash with lower frequency than cash complex
    if bool(getattr(algo, 'APPLY_CASH_FREQUENCY_GUARD', False)):
        # Determine cash frequency threshold: max frequency among cash filter + cash proxies
        cash_syms_for_freq = list(cash_set)
        cash_freq_threshold = 0
        for cs in cash_syms_for_freq:
            cash_freq_threshold = max(cash_freq_threshold, int(freq.get(cs, 0)))
        if cash_freq_threshold > 0:
            kept_non_cash = []
            dropped = []
            for s, w in non_cash:
                if int(freq.get(s, 0)) < cash_freq_threshold:
                    # mark for drop
                    aggregated[s] = 0.0
                    dropped.append((s, freq.get(s, 0)))
                else:
                    kept_non_cash.append((s, w))
            non_cash = kept_non_cash
            # Optional log
            try:
                if getattr(algo, 'PRINT_AGGREGATION_DIAGNOSTICS', False) and dropped:
                    algo.Log("[freq_guard] dropped {} symbols below cash_freq_threshold={}: {}".format(
                        len(dropped), cash_freq_threshold,
                        ", ".join("{}(f={})".format(getattr(s,'Value',str(s)), f) for s, f in dropped)
                    ))
            except Exception:
                pass
    # Sort by aggregate weight desc
    non_cash.sort(key=lambda x: x[1], reverse=True)
    cash_pos.sort(key=lambda x: x[1], reverse=True)
    apply_unified = bool(getattr(algo, 'APPLY_UNIFIED_TOPN_SELECTION', False))
    if apply_unified:
        # Select TopN across cash and non-cash together by aggregated weight
        combined_sorted = sorted(((s, w) for s, w in aggregated.items() if w > 0), key=lambda x: x[1], reverse=True)
        keep_all = [s for s, _ in combined_sorted[:topn_cap]]
        keep_set = set(keep_all)
        # Zero out any positive symbols not in keep_set
        for sym, w in pos_items:
            if sym not in keep_set:
                aggregated[sym] = 0.0
    else:
        # Keep up to topn_cap non-cash, with optional portfolio-level cash boundary truncation
        keep_non_cash = [s for s, _ in non_cash[:topn_cap]]
        apply_portfolio_cash_boundary = bool(getattr(algo, 'APPLY_CASH_TRUNCATION_AT_PORTFOLIO', False))
        if apply_portfolio_cash_boundary and cash_filter_sid is not None:
            # Build combined positive order by aggregated weight desc
            combined_pos = sorted(pos_items, key=lambda x: x[1], reverse=True)
            # Consider the TopN window for boundary evaluation
            top_window = combined_pos[:topn_cap]
            cf_index = next((i for i, (s, _) in enumerate(top_window) if s == cash_filter_sid), None)
            if cf_index is not None:
                # Truncate non-cash to those before the cash boundary within the TopN window
                keep_non_cash = [s for s, _ in top_window[:cf_index] if s not in cash_set]
                try:
                    if getattr(algo, 'PRINT_CASH_BOUNDARY', False):
                        algo.Log(f"[cash_boundary_portfolio] TopN={topn_cap} boundary_index={cf_index} non_cash_kept={len(keep_non_cash)} cash_sid={getattr(cash_filter_sid,'Value',str(cash_filter_sid))}")
                except Exception:
                    pass
        leftover_slots = max(0, topn_cap - len(keep_non_cash))
        # Fill with top cash proxies if any leftover
        keep_cash = [s for s, _ in cash_pos[:leftover_slots]] if leftover_slots > 0 else []
        keep_set = set(keep_non_cash + keep_cash)
        # Zero out any positive symbols not in keep_set
        for sym, w in pos_items:
            if sym not in keep_set:
                aggregated[sym] = 0.0
    # Now recompute long_sum for normalization
    long_sum = sum(w for w in aggregated.values() if w > 0)
    if long_sum > 0:
        scale = target_long / long_sum
        for sym in list(aggregated.keys()):
            if aggregated[sym] > 0:
                aggregated[sym] *= scale

    # Establish target short mass from hedge multiplier for current regime
    try:
        regime = getattr(algo, 'regime', 'Risk-on')
    except Exception:
        regime = 'Risk-on'
    hedge_mult_for_mass = float(getattr(algo, 'HEDGE_MULTIPLIER_INDUSTRY_RISK_ON', 0.0)) if regime == 'Risk-on' else float(getattr(algo, 'HEDGE_MULTIPLIER_INDUSTRY_RISK_OFF', 0.0))
    target_short = -abs(hedge_mult_for_mass)

    # Concentration cap for shorts: keep at most botn_cap non-cash by most negative weights
    # Then rescale to a fixed target short "mass" (analogous to long mass normalization)
    neg_items = [(sym, w) for sym, w in aggregated.items() if w < 0 and sym not in cash_set]
    if neg_items:
        # Sort ascending so most negative first
        neg_items.sort(key=lambda x: x[1])
        # Respect configured number of shorts from Initialize
        cap = max(0, int(botn_cap))
        keep_neg_syms = set(s for s, _ in neg_items[:cap])
        # Zero out dropped shorts
        for sym, w in neg_items[cap:]:
            aggregated[sym] = 0.0
        # Recompute sum after cap
        neg_sum_after = sum(aggregated[sym] for sym in keep_neg_syms)
        # Rescale to target short mass if feasible
        if cap > 0 and neg_sum_after < 0 and target_short < 0 and neg_sum_after != target_short:
            short_scale = target_short / neg_sum_after
            for sym in keep_neg_syms:
                aggregated[sym] *= short_scale
    # One-line summary of target masses (long and short)
    try:
        if getattr(algo, 'PRINT_AGGREGATION_DIAGNOSTICS', False):
            algo.Log(f"[target_mass] target_long={target_long:.5f} target_short={target_short:.5f}")
    except Exception:
        pass
    # Optional diagnostics: print frequency-adjusted math behind targets
    try:
        if getattr(algo, 'PRINT_AGGREGATION_DIAGNOSTICS', False):
            # Compute symbol frequency f_i from sub_port_weights (counts of subs where symbol had positive long weight)
            freq: Dict[Any, int] = {}
            for wmap in sub_port_weights.values():
                for sym, w in wmap.items():
                    if w > 0:
                        freq[sym] = freq.get(sym, 0) + 1
            # Determine kept long set after concentration
            kept_longs = [sym for sym, w in aggregated.items() if w > 0]
            sum_f_kept = sum(freq.get(sym, 0) for sym in kept_longs) or 1
            step = 100.0 * float(target_long) / float(sum_f_kept)
            algo.Log("[agg_diag] target_long={:.5f} sum_f_kept={} step%={:.4f}".format(target_long, sum_f_kept, step))
            # Print each kept symbol's f_i and target%
            for sym in sorted(kept_longs, key=lambda s: (-aggregated[s], getattr(s, 'Value', str(s)))):
                tgt_pct = 100.0 * float(aggregated[sym])
                fi = freq.get(sym, 0)
                algo.Log("[agg_diag] {}: f_i={} -> Target={:.4f}%".format(getattr(sym, 'Value', str(sym)), fi, tgt_pct))
    except Exception:
        pass
    return aggregated


def _compute_target_deltas_vs_current(algo: Any, target_weights: Dict[Any, float]) -> Dict[Any, int]:
    deltas: Dict[Any, int] = {}
    pv = float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0)) or 0.0
    # Ensure we also liquidate symbols not present in target_weights
    full_targets: Dict[Any, float] = dict(target_weights)
    try:
        if hasattr(algo, 'Portfolio'):
            for sym, holding in algo.Portfolio.items():
                if getattr(holding, 'Invested', False) and sym not in full_targets:
                    full_targets[sym] = 0.0
    except Exception:
        pass
    for sym, tgt_wt in full_targets.items():
        sec = getattr(algo, 'Securities', {}).get(sym) if hasattr(algo, 'Securities') else None
        price = float(getattr(sec, 'Price', 0.0)) if sec else 0.0
        cur_qty = int(getattr(algo.Portfolio[sym], 'Quantity', 0)) if hasattr(algo, 'Portfolio') and sym in algo.Portfolio else 0
        tgt_qty = int(round((tgt_wt * pv) / price)) if price > 0 else 0
        delta = tgt_qty - cur_qty
        if delta != 0:
            deltas[sym] = delta
    return deltas


def rebalance_moc(algo: Any) -> None:
    """Main rebalance flow: ensure minute subs → build per-sub weights → aggregate/normalize → MOC sells → MOC buys → audits.

    Assumes regime was computed during T-60 prewarm. If not present, defaults to 'Risk-on'.
    """
    # Do not submit any orders during warmup unless explicitly allowed
    if getattr(algo, 'IsWarmingUp', False) and not getattr(algo, 'ALLOW_TRADES_DURING_WARMUP', False):
        try:
            algo.Debug("[Rebalance] Skipping order submission during warmup")
        except Exception:
            pass
        return
    if not hasattr(algo, 'regime'):
        setattr(algo, 'regime', 'Risk-on')
    # Ensure minute subscriptions for trade set (union of targets + cash proxies)
    try:
        trade_set = set()
        for sub in getattr(algo, 'sub_portfolios', {}).values():
            trade_set.update(sub.get('topranked', []))
            trade_set.update(sub.get('bottomranked', []))
        cash_filter_sid = getattr(algo, 'CASH_FILTERING_SID', None)
        cash_syms_for_minute = [s for s in getattr(algo, 'CASH', []) if s != cash_filter_sid]
        if cash_filter_sid:
            trade_set.add(cash_filter_sid)
        trade_set.update(cash_syms_for_minute)
        if hasattr(algo, '_ensure_minute_for_trade_set'):
            algo._ensure_minute_for_trade_set(trade_set)
    except Exception:
        pass

    # Build per-sub weights and persist
    sub_weights = _build_per_sub_weights(algo)
    # Aggregate and normalize
    targets = _aggregate_and_normalize_targets(algo, sub_weights)
    # Persist aggregated targets for reporting (percent of total portfolio)
    try:
        setattr(algo, 'last_aggregated_targets', dict(targets))
    except Exception:
        pass
    # Compute deltas vs current
    deltas = _compute_target_deltas_vs_current(algo, targets)
    # Split into sells then buys, submit MOCs
    sells, buys = build_moc_basket(algo, deltas)
    submit_moc_basket(algo, sells, buys)

    # Optional audits
    if getattr(algo, 'audit_leverage', False):
        pv = float(getattr(algo.Portfolio, 'TotalPortfolioValue', 0.0)) or 1.0
        gross_long_value = sum(
            holding.HoldingsValue for symbol, holding in algo.Portfolio.items()
            if holding.Quantity > 0 and getattr(algo.Securities.get(symbol, None), 'Price', 0) > 0
        ) if hasattr(algo, 'Portfolio') else 0.0
        gross_short_value = sum(
            abs(holding.HoldingsValue) for symbol, holding in algo.Portfolio.items()
            if holding.Quantity < 0 and getattr(algo.Securities.get(symbol, None), 'Price', 0) > 0
        ) if hasattr(algo, 'Portfolio') else 0.0
        leverage_used = gross_long_value / pv if pv > 0 else 0.0
        algo.Log(f"[audit_leverage] Aggregated targets={len(targets)} deltas={len(deltas)} sells={len(sells)} buys={len(buys)}")
        algo.Log(f"[audit_leverage] Pre-close Gross Long Value: {gross_long_value:.2f}")
        algo.Log(f"[audit_leverage] Pre-close Gross Short Value: {gross_short_value:.2f}")
        algo.Log(f"[audit_leverage] Pre-close Long Leverage: {leverage_used:.2f}x")
"""(Removed) Use QC Optimization over SMA_PERIOD. Placeholder kept to avoid broken legacy imports."""
import math

def log_message(self, message):
    try:
        self.Log(message)
    except Exception:
        print(f"[LOG] {message}")

def debug_message(self, message):
    try:
        self.Debug(message)
    except Exception:
        print(f"[DEBUG] {message}")

def log_commission(self, orderEvent):
    try:
        self.Log(f"[COSTS] OrderID: {orderEvent.OrderId}, Symbol: {orderEvent.Symbol}, Quantity: {orderEvent.FillQuantity}, Commission: {orderEvent.FillFee}")
    except Exception:
        pass

def log_metrics_report(self, begin_value, end_value):
    try:
        self.Log(f"Metrics Report: BeginPortfolioValue={begin_value:.2f}, EndPortfolioValue={end_value:.2f}")
    except Exception:
        pass

def log_period_metrics(self, label, metrics):
    try:
        self.Log(f"{label},{','.join(str(m) for m in metrics)}")
    except Exception:
        pass

def log_audit(self, message):
    try:
        self.Log(f"[Audit] {message}")
    except Exception:
        pass

def log_sub_portfolio(self, message):
    try:
        self.Log(f"[SubPortfolio] {message}")
    except Exception:
        pass

def log_universe(self, univ):
    try:
        self.Log(f"[Universe] Active: {univ}")
    except Exception:
        pass

def log_cash_filter(self, cft, sid):
    try:
        self.Log(f"[CashFilter] Types={cft} SID={sid}")
    except Exception:
        pass

def log_ls_mode(self, ls_label):
    try:
        self.Log(f"[L/S] Active: {ls_label}")
    except Exception:
        pass

def log_normalization(self, mode_name):
    try:
        self.Log(f"[Normalization] Active: {mode_name}")
    except Exception:
        pass

def log_sub_ports(self, long_period, count_each_side, step, total):
    try:
        self.Log(f"[SubPorts] LONG_PERIOD={long_period}, count_each_side={count_each_side}, step={step}, total={total}")
    except Exception:
        pass

def log_sub_ports_range(self, min_lp, max_lp, long_period, step):
    try:
        self.Log(f"[SubPorts] Periods range: {min_lp}..{max_lp} (center={long_period}, step={step})")
    except Exception:
        pass

def log_costs(self, long_margin, short_borrow, stock_borrow):
    try:
        self.Log(f"[Costs] Active: LONG_MARGIN_SPREAD_BPS={long_margin}, SHORT_BORROW_SPREAD_BPS={short_borrow}, STOCK_BORROW_FEE_BPS={stock_borrow}")
    except Exception:
        pass
from AlgorithmImports import *
try:  # noqa: SIM105 (intentional broad import guard)
    from QuantConnect import Resolution  # type: ignore
    from QuantConnect.Data import Slice  # type: ignore
    from QuantConnect.Orders import OrderEvent, OrderStatus  # type: ignore
    from QuantConnect.Algorithm import QCAlgorithm  # type: ignore
    try:
        from QuantConnect import Chart as _QCChart, Series as _QCSeries, SeriesType as _QCSeriesType  # type: ignore
    except Exception:
        _QCChart = _QCSeries = _QCSeriesType = None  # type: ignore
except Exception:
    # Local/offline lint environment may not have QuantConnect assemblies; ignore failures.
    pass

# Robust normalization enum alias for linters and runtime
try:
    from QuantConnect import DataNormalizationMode as QCDataNormalizationMode  # type: ignore
except Exception:
    try:
        QCDataNormalizationMode = DataNormalizationMode  # type: ignore
    except Exception:
        QCDataNormalizationMode = None  # type: ignore
from datetime import datetime, timedelta
import numpy as np
import math
import os, sys
try:
    _here = os.path.dirname(os.path.abspath(__file__))
except Exception:
    _here = "."
for _cand in (
    _here,                                   # .../qc-ready
    os.path.join(_here, "qc-ready"),        # .../qc-ready/qc-ready (uploaded parent)
    os.path.dirname(_here),                  # parent dir when main.py moved to root
):
    try:
        if os.path.isdir(os.path.join(_cand, "helpers")) and _cand not in sys.path:
            sys.path.insert(0, _cand)
    except Exception:
        pass
try:
    _p = _here
    for _ in range(5):  # walk up to 5 levels
        _p_next = os.path.dirname(_p)
        if not _p_next or _p_next == _p:
            break
        _p = _p_next
        # Case 1: parent contains helpers/
        if os.path.isdir(os.path.join(_p, "helpers")):
            if _p not in sys.path:
                sys.path.insert(0, _p)
            break
        # Case 2: parent/qc-ready contains helpers/
        _q = os.path.join(_p, "qc-ready")
        if os.path.isdir(os.path.join(_q, "helpers")):
            if _q not in sys.path:
                sys.path.insert(0, _q)
            break
except Exception:
    pass

try:
    # If a different third-party 'helpers' module was pre-imported in the runtime,
    # and it doesn't expose our expected API, drop it so Python reloads from our sys.path entry.
    try:
        if 'helpers' in sys.modules and not hasattr(sys.modules.get('helpers'), 'configure_universe'):
            del sys.modules['helpers']
    except Exception:
        pass
    from helpers import (
        configure_universe,
        configure_logging_flags,
        _helpers_prewarm_minute_data,
        _helpers_rebalance,
        _helpers_print_monthly_metrics,
        _helpers_print_sub_portfolio_metrics,
        _helpers_print_sub_portfolio_audit,
        _helpers_print_positions,
        _helpers_update_sub_portfolio_navs,
        _helpers_Fred as Fred,
        FRED_DEFAULT_SERIES,
        LOG_OVERRIDES,
        _helpers_apply_margin_and_borrow_costs,
        _helpers_perf_configure,
        _helpers_perf_execution_snapshot,
        _helpers_perf_on_order_event,
        _helpers_perf_on_end_of_algorithm,
    )
except Exception:
    import importlib
    _H = None
    for _modname in ("project.helpers",):
        try:
            _H = importlib.import_module(_modname)
            break
        except Exception:
            _H = None
    if _H is None:
        # Final fallback: load helpers package directly from disk using the first candidate path discovered above
        try:
            import importlib.util as _ilu
            _helpers_dir = None
            for _cand in (
                _here,
                os.path.join(_here, "qc-ready"),
                os.path.dirname(_here),
            ):
                if os.path.isdir(os.path.join(_cand, "helpers")):
                    _helpers_dir = os.path.join(_cand, "helpers")
                    break
            if _helpers_dir is None:
                raise ImportError("helpers directory not found in candidate paths")
            _init_path = os.path.join(_helpers_dir, "__init__.py")
            spec = _ilu.spec_from_file_location("_local_helpers", _init_path)
            if spec is None or spec.loader is None:
                raise ImportError("cannot build spec for local helpers")
            _H = importlib.util.module_from_spec(spec)
            spec.loader.exec_module(_H)
        except Exception as _e:
            raise ImportError("helpers/ missing. Place 'helpers/' next to main.py. Original: " + str(_e))
    # Map attributes to local names expected by the algorithm
    configure_universe = getattr(_H, 'configure_universe')
    configure_logging_flags = getattr(_H, 'configure_logging_flags')
    _helpers_prewarm_minute_data = getattr(_H, '_helpers_prewarm_minute_data')
    _helpers_rebalance = getattr(_H, '_helpers_rebalance')
    _helpers_print_monthly_metrics = getattr(_H, '_helpers_print_monthly_metrics')
    _helpers_print_sub_portfolio_metrics = getattr(_H, '_helpers_print_sub_portfolio_metrics')
    _helpers_print_sub_portfolio_audit = getattr(_H, '_helpers_print_sub_portfolio_audit')
    _helpers_print_positions = getattr(_H, '_helpers_print_positions')
    _helpers_update_sub_portfolio_navs = getattr(_H, '_helpers_update_sub_portfolio_navs')
    Fred = getattr(_H, '_helpers_Fred')
    FRED_DEFAULT_SERIES = getattr(_H, 'FRED_DEFAULT_SERIES')
    LOG_OVERRIDES = getattr(_H, 'LOG_OVERRIDES')
    _helpers_apply_margin_and_borrow_costs = getattr(_H, '_helpers_apply_margin_and_borrow_costs')


class Main(QCAlgorithm):
    def Initialize(self):
        self.SetStartDate(2015, 11, 30)
        self.SetEndDate(2025, 12, 5)
        self.initialcash = 100000
        self.SetCash(self.initialcash)
        self.SetTimeZone("America/New_York")

        try:
            if QCDataNormalizationMode is not None:
                self.Settings.DataNormalizationMode = QCDataNormalizationMode.Adjusted
        except Exception:
            pass

        try:
            _orig_log = self.Log
            _orig_debug = self.Debug
            def _guarded_log(msg):
                try:
                    if getattr(self, 'Time', None) is not None and getattr(self, 'StartDate', None) is not None and self.Time >= self.StartDate:
                        _orig_log(msg)
                except Exception:
                    pass
            def _guarded_debug(msg):
                try:
                    if getattr(self, 'Time', None) is not None and getattr(self, 'StartDate', None) is not None and self.Time >= self.StartDate:
                        _orig_debug(msg)
                except Exception:
                    pass
            self.Log = _guarded_log
            self.Debug = _guarded_debug
        except Exception:
            pass

    
        self.long_leverage = 1.30
        # Set a safe default security initializer before subscriptions; refined after L/S parsing
        self.SetSecurityInitializer(lambda x: x.SetLeverage(4.0))

        # Core toggles
        self.USE_MINUTE_DATA = False
        self.ENABLE_DAILY_RANKING_SNAPSHOT = False
        self._t60_window_active = False
        self._pending_minute_cleanup = False
        self._minute_subscribed = set()
        self.R2 = True  # True for R^2 ranking; False for r
        # Funding/margin knobs (overridable via QC Parameters)
        self.STOCK_BORROW_FEE_BPS = 50
        self.LONG_MARGIN_SPREAD_BPS = 75
        self.SHORT_BORROW_SPREAD_BPS = 50
        self.cumulative_long_margin_cost = 0.0
        
        self.ALLOW_TRADES_DURING_WARMUP = False
        self.APPLY_COSTS = True  # flip to False to disable daily cost application
        try:
            _lm = self.GetParameter("LONG_MARGIN_SPREAD_BPS")
            if _lm is not None and str(_lm).strip() != "":
                self.LONG_MARGIN_SPREAD_BPS = int(float(str(_lm).strip()))
        except Exception:
            pass
        try:
            _atdw = self.GetParameter("ALLOW_TRADES_DURING_WARMUP")
            if _atdw is not None and str(_atdw).strip() != "":
                s = str(_atdw).strip().lower()
                self.ALLOW_TRADES_DURING_WARMUP = s in ("1", "true", "yes", "y", "on")
        except Exception:
            pass
        try:
            _sb = self.GetParameter("SHORT_BORROW_SPREAD_BPS")
            if _sb is not None and str(_sb).strip() != "":
                self.SHORT_BORROW_SPREAD_BPS = int(float(str(_sb).strip()))
        except Exception:
            pass
        try:
            _sf = self.GetParameter("STOCK_BORROW_FEE_BPS")
            if _sf is not None and str(_sf).strip() != "":
                self.STOCK_BORROW_FEE_BPS = int(float(str(_sf).strip()))
        except Exception:
            pass
        try:
            self.Log(
                f"[Costs] Active: LONG_MARGIN_SPREAD_BPS={int(getattr(self,'LONG_MARGIN_SPREAD_BPS',0))}, "
                f"SHORT_BORROW_SPREAD_BPS={int(getattr(self,'SHORT_BORROW_SPREAD_BPS',0))}, "
                f"STOCK_BORROW_FEE_BPS={int(getattr(self,'STOCK_BORROW_FEE_BPS',0))}"
            )
        except Exception:
            pass
        
        # Guard to ensure end-of-day updates run once per day (regardless of handler overloads)
        self._last_eod_date = None

        # Logging profile via QC Parameter (normalize and validate)
        _p = (self.GetParameter("LOG_PROFILE") or "summary").strip().lower()
        log_profile = _p if _p in ("summary", "detailed", "audit") else "summary"
        # Apply a profile, then enforce project-wide explicit overrides from helpers.config.LOG_OVERRIDES
        configure_logging_flags(self, profile=log_profile, overrides=LOG_OVERRIDES)

        # Optional: FRED series
        try:
            self.fedfunds = self.AddData(Fred, FRED_DEFAULT_SERIES, Resolution.Daily).Symbol
        except Exception:
            pass

        # Universe and benchmarks
        def _get_univ_param():
            for _k in ("UNIVERSE", "Universe", "universe"):
                try:
                    _v = self.GetParameter(_k)
                    if _v is not None and str(_v).strip() != "":
                        return _v
                except Exception:
                    continue
            return None
        _u = _get_univ_param()
        univ = (str(_u).strip().upper() if _u is not None and str(_u).strip() != "" else "EQUITY")
        if univ not in ("EQUITY", "BONDS"):
            univ = "EQUITY"
        configure_universe(self, universe_type=univ, use_minute=self.USE_MINUTE_DATA)
        # One-line confirmation of the selected universe
        try:
            self.Log(f"[Universe] Active: {univ}")
        except Exception:
            pass
        # Confirm core multiplier settings set by universe configuration (momentum/vol)
        try:
            lm = float(getattr(self, 'LONG_MOMENTUM_MULTIPLIER', 0.0) or 0.0)
            sm = float(getattr(self, 'SHORT_MOMENTUM_MULTIPLIER', 0.0) or 0.0)
            vm = float(getattr(self, 'VOL_MULTIPLIER', 0.0) or 0.0)
            self.Log(
                f"[Factors] Active: LONG_MOMENTUM_MULTIPLIER={lm:.2f}, "
                f"SHORT_MOMENTUM_MULTIPLIER={sm:.2f}, VOL_MULTIPLIER={vm:.2f}"
            )
        except Exception:
            pass
        # Optional QC Parameter override for CASH_FILTER_TYPES
        try:
            raw_cft = self.GetParameter("CASH_FILTER_TYPES")
            if raw_cft is not None and str(raw_cft).strip() != "":
                s = str(raw_cft).strip().lower()
                if s.startswith("[") and s.endswith("]"):
                    s = s[1:-1]
                parts = [p.strip() for p in (s.replace(" ", ",").split(",")) if p.strip() != ""]
                allowed = {"industry", "sector"}
                parsed = [p for p in parts if p in allowed]
                if str(raw_cft).strip() in ("[]", "[ ]"):
                    parsed = []
                if parsed is not None:
                    self.CASH_FILTER_TYPES = list(parsed)
        except Exception:
            pass
        # Optional QC Parameter override for CASH_FILTERING_SID (cash filter symbol)
        try:
            raw_sid = self.GetParameter("CASH_FILTER_SID")
            if raw_sid is not None and str(raw_sid).strip() != "":
                ticker = str(raw_sid).strip().upper()
                try:
                    sec = self.AddEquity(ticker, Resolution.Daily)
                    self.CASH_FILTERING_SID = sec.Symbol
                except Exception:
                    try:
                        # Fallback: create Symbol reference without subscription
                        self.CASH_FILTERING_SID = self.Symbol(ticker)
                    except Exception:
                        pass
        except Exception:
            pass
        # One-line confirmation of cash filter policy and SID
        try:
            cft = list(getattr(self, 'CASH_FILTER_TYPES', []))
            sid = getattr(self, 'CASH_FILTERING_SID', None)
            self.Log(f"[CashFilter] Types={cft} SID={sid}")
        except Exception:
            pass
        # Universe-specific leverage defaults and optional overrides
        # For BONDS: default to no leverage (1.00x long exposure). Allow QC Parameter BondLeverage to override.
        try:
            if getattr(self, 'FIXED_UNIVERSE_TYPE', 'EQUITY') == 'BONDS':
                self.long_leverage = 1.00
                _bl = self.GetParameter("BondLeverage")
                if _bl is not None and str(_bl).strip() != "":
                    try:
                        self.long_leverage = max(0.0, float(str(_bl).strip()))
                    except Exception:
                        pass
        except Exception:
            pass
        # Set exchange accessor used elsewhere
        try:
            self.exchange = self.Securities[self.jpm.Symbol].Exchange
        except Exception:
            pass
        # Default regime before first calculation to avoid None in logs
        if not hasattr(self, 'regime'):
            self.regime = 'Risk-on'

        self.LONG_PERIOD = 124
        self.SHORT_PERIOD = self.LONG_PERIOD // 2
        self.VOL_PERIOD = self.LONG_PERIOD // 2
        self.SMA_PERIOD = self.LONG_PERIOD // 2
        # Optional QC Parameter override for SMA period used by regime model
        try:
            _sma = self.GetParameter("SMA_PERIOD")
            if _sma is not None and str(_sma).strip() != "":
                v = int(float(str(_sma).strip()))
                if v > 1:
                    self.SMA_PERIOD = v
        except Exception:
            pass
        # Upfront confirmation of the regime SMA period length
        # Build LONG_PERIODS symmetrically around LONG_PERIOD based on a simple "count/gap" config.
        # Example: SUB_PORT_RANGE="10/0" => 10 on each side, no gaps (step=1); includes LONG_PERIOD (total 21)
        #          SUB_PORT_RANGE="10/2" => 10 on each side, step=2 (e.g., ... 120,122,124,126,128 ...)
        # Manual defaults (definitive single source; QC Parameter is optional):
        # Edit these two lines to change the sub-portfolio layout.
        self.SUB_PORT_COUNT_EACH_SIDE = 10
        self.SUB_PORT_GAP = 2
        count_each_side = int(self.SUB_PORT_COUNT_EACH_SIDE)
        gap = int(self.SUB_PORT_GAP)
        try:
            _range_raw = self.GetParameter("SUB_PORT_RANGE")
            if _range_raw is not None and str(_range_raw).strip() != "":
                s = str(_range_raw).strip().lower().replace(" ", "")
                parts = s.split("/") if "/" in s else (s.split(",") if "," in s else [s])
                if len(parts) >= 1 and parts[0] != "":
                    count_each_side = max(0, int(float(parts[0])))
                if len(parts) >= 2 and parts[1] != "":
                    gap = max(0, int(float(parts[1])))
        except Exception:
            pass
        step = 1 if gap <= 0 else gap
        periods = []
        try:
            for k in range(-count_each_side, count_each_side + 1):
                p = int(self.LONG_PERIOD + k * step)
                if p > 0:
                    periods.append(p)
        except Exception:
            pass
        if periods:
            self.LONG_PERIODS = sorted(set(periods))
        else:
            self.LONG_PERIODS = [int(self.LONG_PERIOD)]
            try:
                self.Log(
                    f"[SubPorts][Fallback] Empty/invalid SUB_PORT_RANGE (count_each_side={count_each_side}, step={step}); using minimal fallback: [{int(self.LONG_PERIOD)}]"
                )
            except Exception:
                pass
        try:
            self.Log(f"[SubPorts] LONG_PERIOD={self.LONG_PERIOD}, count_each_side={count_each_side}, step={step}, total={len(self.LONG_PERIODS)}")
        except Exception:
            pass
        try:
            if getattr(self, 'LONG_PERIODS', None):
                lp = list(self.LONG_PERIODS)
                self.Log(f"[SubPorts] Periods range: {min(lp)}..{max(lp)} (center={self.LONG_PERIOD}, step={step})")
        except Exception:
            pass
        self.sub_portfolios = {}
        for period in self.LONG_PERIODS:
            self.sub_portfolios[period] = {
                "LONG_PERIOD": period,
                "SHORT_PERIOD": period // 2,
                "VOL_PERIOD": period // 2,
                "ranking": None,
                "topranked": [],
                "bottomranked": [],
            }
        n_sp = len(self.LONG_PERIODS) if self.LONG_PERIODS else 1
        initial_sub_value = self.initialcash / n_sp
        self.sub_portfolio_equity_history = {period: [] for period in self.LONG_PERIODS}
        self.sub_portfolio_nav = {period: float(initial_sub_value) for period in self.LONG_PERIODS}
        self.sub_portfolio_nav_history = {period: [float(initial_sub_value)] for period in self.LONG_PERIODS}
        self.sub_portfolio_nav_timeline = {period: [self.StartDate] for period in self.LONG_PERIODS}
        self.overview_nav = float(sum(self.sub_portfolio_nav.values())) if self.sub_portfolio_nav else float(self.initialcash)
        self.overview_nav_history = [float(self.overview_nav)]
        self.overview_nav_timeline = [self.StartDate]
        self.sub_portfolio_weights = {period: {} for period in self.LONG_PERIODS}
        self._prev_close = {}
        self.downside_capture_num = 0.0   # sum of strategy daily returns on benchmark down days
        self.downside_capture_den = 0.0   # sum of |benchmark daily returns| on benchmark down days
        self.downside_capture_days = 0    # count of benchmark down days included
        self.downside_capture_ratio = float('nan')
        self._bench_prev_close = None
        self._eom_liquidated = []
        self._last_eom_month = None
        self._ledger_opens = []       # [{'time': dt, 'symbol': str, 'fill_qty': float, 'fill_price': float}]
        self._ledger_closes = []      # [{'time': dt, 'symbol': str, 'fill_qty': float, 'fill_price': float}]
        self._ledger_eom_positions = []  # [{'time': dt, 'positions': [{'symbol':str,'qty':float,'weight':float,'close':float,'mv':float}], 'total_value': float}]
        self.SUB_CHART_SERIES_SAFE_LIMIT = 7
        existing_limit = int(getattr(self, 'SUB_CHART_SERIES_LIMIT', 8) or 8)
        try:
            _lim_raw = self.GetParameter("SUB_CHART_SERIES_LIMIT")
            if _lim_raw is not None and str(_lim_raw).strip() != "":
                _lim_val = int(str(_lim_raw).strip())
                if _lim_val <= 0:
                    _lim_val = existing_limit or 8
                # Clamp to safe cap
                self.SUB_CHART_SERIES_LIMIT = max(1, min(_lim_val, int(getattr(self, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
            else:
                self.SUB_CHART_SERIES_LIMIT = max(1, min(existing_limit or 8, int(getattr(self, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
        except Exception:
            self.SUB_CHART_SERIES_LIMIT = max(1, min(existing_limit or 8, int(getattr(self, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
        existing_single = bool(getattr(self, 'SUB_CHART_SINGLE_PANEL', False))
        try:
            _single_raw = self.GetParameter("SUB_CHART_SINGLE_PANEL")
            if _single_raw is not None and str(_single_raw).strip() != "":
                _single = str(_single_raw).strip().lower()
                self.SUB_CHART_SINGLE_PANEL = _single in ("1", "true", "yes", "y", "on", "all")
            else:
                self.SUB_CHART_SINGLE_PANEL = existing_single
        except Exception:
            self.SUB_CHART_SINGLE_PANEL = existing_single

        # Performance reporting: parse params, init ledgers, schedule EOM snapshot
        _helpers_perf_configure(self)

        try:
            # Only run when Chart types are available; otherwise rely on Plot() to auto-create series
            if (_QCChart is not None) and (_QCSeries is not None) and (_QCSeriesType is not None):
                n_sp = len(self.LONG_PERIODS) if self.LONG_PERIODS else 0
                series_limit_raw = int(getattr(self, 'SUB_CHART_SERIES_LIMIT', 8) or 8)
                series_limit = max(1, min(series_limit_raw, int(getattr(self, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
                periods_sorted = sorted(self.LONG_PERIODS)
                if getattr(self, 'SUB_CHART_SINGLE_PANEL', False):
                    for base in ("SubNAV", "SubDrawdown"):
                        try:
                            ch = _QCChart(base)
                            # Seed with all series so later Plot calls are guaranteed to appear
                            for p in periods_sorted:
                                ch.AddSeries(_QCSeries(f"SP-{p}", _QCSeriesType.Line, 0))
                            self.AddChart(ch)
                        except Exception:
                            pass
                else:
                    windows = max(1, int((n_sp + series_limit - 1) // series_limit))
                    for i in range(windows):
                        suffix = "" if i == 0 else f"-{i+1}"
                        start = i * series_limit
                        end = min(start + series_limit, n_sp)
                        window_periods = periods_sorted[start:end]
                        for base in ("SubNAV", "SubDrawdown"):
                            try:
                                ch = _QCChart(f"{base}{suffix}")
                                for p in window_periods:
                                    ch.AddSeries(_QCSeries(f"SP-{p}", _QCSeriesType.Line, 0))
                                self.AddChart(ch)
                            except Exception:
                                pass
        except Exception:
            pass

        def _parse_bool(val):
            try:
                s = str(val).strip().lower()
                return s in ("1", "true", "yes", "y", "on")
            except Exception:
                return False
        # DIAG_BOUNDARY_META
        try:
            _dbm = self.GetParameter("DIAG_BOUNDARY_META")
            if _dbm is not None and str(_dbm).strip() != "":
                self.DIAG_BOUNDARY_META = _parse_bool(_dbm)
        except Exception:
            pass
        # DIAG_RANKING_BOUNDARY
        try:
            _drb = self.GetParameter("DIAG_RANKING_BOUNDARY")
            if _drb is not None and str(_drb).strip() != "":
                self.DIAG_RANKING_BOUNDARY = _parse_bool(_drb)
        except Exception:
            pass
        # RANKING_BOUNDARY_K (default handled in helpers.ranking)
        try:
            _kb = self.GetParameter("RANKING_BOUNDARY_K")
            if _kb is not None and str(_kb).strip() != "":
                self.RANKING_BOUNDARY_K = int(str(_kb).strip())
        except Exception:
            pass
        # DIAG_SERIES_DUMP and optional symbol value or separate symbol parameter
        try:
            _dsd = self.GetParameter("DIAG_SERIES_DUMP")
            _dsym = None
            # Prefer explicit symbol parameter when present
            try:
                _dsym_raw = self.GetParameter("DIAG_SERIES_DUMP_SYMBOL")
                if _dsym_raw is not None and str(_dsym_raw).strip() != "":
                    _dsym = str(_dsym_raw).strip().upper()
            except Exception:
                _dsym = None
            if _dsd is not None and str(_dsd).strip() != "":
                s = str(_dsd).strip()
                # If value parses as boolean True, enable dump; if it's a ticker, enable and set symbol filter
                if _parse_bool(s):
                    self.DIAG_SERIES_DUMP = True
                else:
                    # Treat as ticker string (fallback when QC blocks DIAG_SERIES_DUMP_SYMBOL)
                    self.DIAG_SERIES_DUMP = True
                    self.DIAG_SERIES_DUMP_SYMBOL = s.upper()
            # If only symbol param was provided, turn on dump and set symbol
            if _dsym is not None and _dsym != "":
                self.DIAG_SERIES_DUMP = True
                self.DIAG_SERIES_DUMP_SYMBOL = _dsym
        except Exception:
            pass

    # Regime configuration: always period-based SMA using SMA_PERIOD for the EQ/FI ratio
        try:
            sma_p = int(getattr(self, 'SMA_PERIOD', 50))
            self.regime_mode_msg = f"[Regime] Active: period SMA over {sma_p} bars"
            self._regime_mode_logged = False
            def _log_regime_mode():
                if getattr(self, '_regime_mode_logged', False):
                    return
                try:
                    self.Log(self.regime_mode_msg)
                except Exception:
                    pass
                # Also confirm the active data normalization mode once logs are enabled
                try:
                    mode = getattr(self.Settings, 'DataNormalizationMode', None)
                    mode_name = getattr(mode, 'name', None)
                    if not mode_name:
                        mode_name = str(mode)
                    self.Log(f"[Normalization] Active: {mode_name}")
                except Exception:
                    pass
                self._regime_mode_logged = True
            self.Schedule.On(
                self.DateRules.EveryDay(self.spy),
                self.TimeRules.AfterMarketOpen(self.spy, 1),
                _log_regime_mode
            )
        except Exception:
            pass
        # QC Optimizer should be used for SMA period testing; passive helper removed

        try:
            plotted_count = 0
            series_limit_raw = int(getattr(self, 'SUB_CHART_SERIES_LIMIT', 8) or 8)
            series_limit = max(1, min(series_limit_raw, int(getattr(self, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
            for period in sorted(self.LONG_PERIODS):
                nav_val = float(self.sub_portfolio_nav.get(period, initial_sub_value))
                if getattr(self, 'SUB_CHART_SINGLE_PANEL', False):
                    suffix = ""
                else:
                    chart_index = plotted_count // series_limit
                    suffix = "" if chart_index == 0 else f"-{chart_index+1}"
                if nav_val > 0:
                    self.Plot(f"SubNAV{suffix}", f"SP-{period}", math.log(nav_val))
                self.Plot(f"SubDrawdown{suffix}", f"SP-{period}", 0.0)
                plotted_count += 1
        except Exception:
            pass

        self.INDUSTRY_TOPN = 10
        self.SECTOR_TOPN = 10
        self.INDUSTRY_BOTN = self.INDUSTRY_TOPN // 2
        self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = 0.00
        self.HEDGE_MULTIPLIER_INDUSTRY_RISK_OFF = 0.00
        if not hasattr(self, 'LongShortMode'):
            self.LongShortMode = "off"
        try:
            # Base defaults from config
            ls_mode = str(getattr(self, 'LongShortMode', 'off')).strip().lower()
            long_lev_default = float(getattr(self, 'LongLeverage', 1.30))
            short_pct_default = float(getattr(self, 'ShortMassPercent', 30.0))
            short_count_default = getattr(self, 'ShortCount', None)
            # QC parameter overrides
            _lsm = self.GetParameter("LongShortMode")
            if _lsm is not None and str(_lsm).strip() != "":
                ls_mode = str(_lsm).strip().lower()
            _ll = self.GetParameter("LongLeverage")
            if _ll is not None and str(_ll).strip() != "":
                try:
                    long_lev_default = float(str(_ll).strip())
                except Exception:
                    pass
            _sp = self.GetParameter("ShortMassPercent")
            if _sp is not None and str(_sp).strip() != "":
                try:
                    short_pct_default = float(str(_sp).strip())
                except Exception:
                    pass
            _sc = self.GetParameter("ShortCount")
            if _sc is not None and str(_sc).strip() != "":
                try:
                    short_count_default = int(str(_sc).strip())
                except Exception:
                    pass
            # Normalize common aliases and allow generic pattern like "150-50" or "150/50"
            if ls_mode in ("130-30", "130/30", "ls", "longshort", "l/s"):
                ls_mode = "130-30"
            elif ls_mode in ("100-30", "100/30"):
                ls_mode = "100-30"
            elif ls_mode in ("none", "off", "long-only", "longonly"):
                ls_mode = "off"
            else:
                # Try to parse pattern X-Y or X/Y where X,Y are percents (e.g., 150-50)
                try:
                    sep = "-" if "-" in ls_mode else ("/" if "/" in ls_mode else None)
                    if sep is not None:
                        parts = [p.strip() for p in ls_mode.split(sep) if p is not None]
                        if len(parts) == 2 and parts[0].replace(".", "", 1).isdigit() and parts[1].replace(".", "", 1).isdigit():
                            l_val = float(parts[0]) / 100.0
                            s_val = float(parts[1]) / 100.0
                            if l_val > 0.0 and s_val >= 0.0:
                                ls_mode = "custom"
                                long_lev_default = l_val
                                short_pct_default = s_val * 100.0
                except Exception:
                    pass
            # Apply
            if ls_mode == "130-30":
                self.long_leverage = 1.30
                self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = 0.30
            elif ls_mode == "100-30":
                self.long_leverage = 1.00
                self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = 0.30
            elif ls_mode == "custom":
                # Use provided values
                self.long_leverage = max(0.0, float(long_lev_default))
                self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = max(0.0, float(short_pct_default) / 100.0)
            else:  # off
                # Stay long-only, keep configured leverage
                self.long_leverage = float(getattr(self, 'long_leverage', 1.30))
                self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = 0.00
            # Determine short count for Risk-on
            if short_count_default is not None:
                try:
                    self.INDUSTRY_BOTN = max(0, int(short_count_default))
                except Exception:
                    self.INDUSTRY_BOTN = self.INDUSTRY_TOPN // 2
            else:
                self.INDUSTRY_BOTN = self.INDUSTRY_TOPN // 2
            # Risk-off shorts remain disabled by default
            self.HEDGE_MULTIPLIER_INDUSTRY_RISK_OFF = 0.00
            # One-time L/S mode log
            try:
                ls_label = ls_mode
                if ls_mode == "custom":
                    ls_label = f"custom (L={self.long_leverage:.2f}, S={self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON:.2f}, botn={self.INDUSTRY_BOTN})"
                elif ls_mode == "130-30":
                    ls_label = f"130/30 (botn={self.INDUSTRY_BOTN})"
                elif ls_mode == "100-30":
                    ls_label = f"100/30 (botn={self.INDUSTRY_BOTN})"
                else:
                    ls_label = f"off (long-only @ L={self.long_leverage:.2f})"
                self.Log(f"[L/S] Active: {ls_label}")
                # Explicit one-liner and runtime stat when L/S is off
                if ls_mode == "off":
                    try:
                        self.Log(f"Long-only at {self.long_leverage:.2f}x")
                        self.SetRuntimeStatistic("Long Exposure", f"{self.long_leverage:.2f}x")
                    except Exception:
                        pass
            except Exception:
                pass
        except Exception:
            # Fall back to safe long-only behavior on any parsing error
            self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = 0.00
        
        # After Long/Short parsing, enforce 4.0x leverage for all universes and scenarios
        try:
            desired_sec_lev = 4.0
            # Ensure future subscriptions inherit this leverage
            self.SetSecurityInitializer(lambda x, lev=desired_sec_lev: x.SetLeverage(lev))
            # Align currently-added securities as well
            try:
                for sec in list(getattr(self, 'Securities', {}).Values):
                    try:
                        sec.SetLeverage(desired_sec_lev)
                    except Exception:
                        continue
            except Exception:
                pass
        except Exception:
            pass
        
        # After L/S parsing, set benchmark based on shorts permission for EQUITY universe
        try:
            utype = getattr(self, 'FIXED_UNIVERSE_TYPE', 'EQUITY')
            if utype == 'EQUITY':
                has_shorts = float(getattr(self, 'HEDGE_MULTIPLIER_INDUSTRY_RISK_ON', 0.0)) > 0.0
                if has_shorts and hasattr(self, 'csm'):
                    self.SetBenchmark(self.csm)
                    self.BENCHMARK_SYMBOL = self.csm
                elif hasattr(self, 'iwv'):
                    self.SetBenchmark(self.iwv)
                    self.BENCHMARK_SYMBOL = self.iwv
        except Exception:
            pass
        # One-line confirmation of the designated benchmark
        try:
            bm = getattr(self, 'BENCHMARK_SYMBOL', None)
            bm_str = getattr(bm, 'Value', str(bm)) if bm is not None else 'None'
            self.Log(f"[Benchmark] Active: {bm_str}")
        except Exception:
            pass
        self.CASH_PERCENT = 0.25
        self.PERCENTAGE_MOMENTUM = True
        self.VOL_INDUSTRY = 'R2'
        self.VOL_SECTOR = 'R2'
        self.RANKING_METRIC = "R2"

        # Warmup
        self.SetWarmUp(max(self.LONG_PERIODS) * 2, Resolution.Daily)

        # Schedules
        # T-60 prewarm: compute regime and enable minute subs for trade set
        self.Schedule.On(
            self.DateRules.MonthEnd(self.spy),
            self.TimeRules.BeforeMarketClose(self.spy, 60),
            self.prewarm_minute_data
        )
        # T-30 rebalance: submit MOC orders
        self.Schedule.On(
            self.DateRules.MonthEnd(self.spy),
            self.TimeRules.BeforeMarketClose(self.spy, 30),
            self.rebalance_moc
        )
        # T-0: monthly metrics snapshot (NAV-based) and cleanup flag
        self.Schedule.On(
            self.DateRules.MonthEnd(self.spy),
            self.TimeRules.BeforeMarketClose(self.spy, 0),
            self.print_monthly_metrics
        )
        # Next morning cleanup hook (optional)
        self.Schedule.On(
            self.DateRules.EveryDay(self.spy),
            self.TimeRules.AfterMarketOpen(self.spy, 5),
            self._maybe_cleanup_minute_subs
        )

        # Optional month-end prints driven by flags
        should_print_positions = (
            getattr(self, 'flagCommentRank', 'None') == 1 or
            getattr(self, 'POSITION_REPORTING_FLAG', False) or
            getattr(self, 'abbreviated_positions_print', False) or
            getattr(self, 'sub_port_positions', False)
        )
        if should_print_positions:
            self.Schedule.On(
                self.DateRules.MonthEnd(self.spy),
                self.TimeRules.BeforeMarketClose(self.spy, 0),
                self.print_positions
            )
        # Independent sub-portfolio audit print at T-0, controlled by sub_port_audit flag
        if getattr(self, 'sub_port_audit', False):
            self.Schedule.On(
                self.DateRules.MonthEnd(self.spy),
                self.TimeRules.BeforeMarketClose(self.spy, 0),
                self.print_sub_portfolio_audit
            )
        # Sub-portfolio metrics (CAGR, MaxDD, Alpha vs benchmark) will be printed only at end of algorithm
        if getattr(self, 'flagCommentRank', 'None') == 0:
            self.Schedule.On(
                self.DateRules.MonthEnd(self.spy),
                self.TimeRules.BeforeMarketClose(self.spy, 0),
                lambda: self.Log(f"daily equity: {self.Portfolio.TotalPortfolioValue}")
            )
        if getattr(self, 'PRINT_CLOSED_POSITIONS', False):
            self.Schedule.On(
                self.DateRules.MonthEnd(self.spy),
                self.TimeRules.BeforeMarketClose(self.spy, 0),
                self.print_closed_positions
            )
        # EOM snapshot scheduled in helpers.performance.configure_perf_reporting

    # --- Data and EOD hooks ---
    def OnData(self, data: Slice):
        try:
            if hasattr(self, 'fedfunds') and self.fedfunds in data and hasattr(data[self.fedfunds], 'Value'):
                val = data[self.fedfunds].Value
                if val is not None:
                    self.fedfunds_rate = float(val) / 100.0
        except Exception:
            pass

    def _on_end_of_day_core(self):
        # Run at most once per day even if multiple symbols trigger EOD
        try:
            cur_date = None
            try:
                cur_date = self.Time.date()
            except Exception:
                cur_date = getattr(self, 'Time', None)
            if cur_date is not None and getattr(self, '_last_eod_date', None) == cur_date:
                return
            self._last_eod_date = cur_date
        except Exception:
            pass
        # NAV/Drawdown updates and margin cost application
        try:
            self._update_sub_portfolio_navs()
        except Exception:
            pass
        try:
            if getattr(self, 'APPLY_COSTS', True):
                _helpers_apply_margin_and_borrow_costs(self)
                # Daily runtime stats suppressed; emit funding summary at EndOfAlgorithm only
        except Exception:
            pass

    # Symbol overload EOD (preferred by Lean). Gate execution to SPY only.
    def OnEndOfDay(self, symbol):
        try:
            # Require SPY presence
            if not hasattr(self, 'spy') or getattr(self, 'spy', None) is None:
                return
            # Only run when SPY triggers
            if symbol != getattr(self, 'spy', None):
                return
            # Ensure SPY has data (indicates a trading day)
            spy_sym = getattr(self, 'spy', None)
            sec = self.Securities.get(spy_sym) if hasattr(self, 'Securities') else None
            if not sec or getattr(sec, 'Price', 0) <= 0:
                return
        except Exception:
            return
        try:
            self._on_end_of_day_core()
        except Exception:
            pass

    # --- Thin wrappers over helpers ---
    def _can_trade_now(self) -> bool:
        try:
            if getattr(self, 'IsWarmingUp', False) and not getattr(self, 'ALLOW_TRADES_DURING_WARMUP', False):
                return False
            if getattr(self, '_suspend_trading', False):
                return False
            return True
        except Exception:
            # Be permissive if environment lacks flags
            return True

    def prewarm_minute_data(self):
        try:
            _helpers_prewarm_minute_data(self)
        except Exception as e:
            self.Debug(f"[prewarm_minute_data] error: {e}")

    def rebalance_moc(self):
        # Centralized guard before submitting orders
        if not self._can_trade_now():
            try:
                self.Debug("[Rebalance] Trading gated (warmup/suspended); skipping order submission")
            except Exception:
                pass
            return
        try:
            _helpers_rebalance(self)
        except Exception as e:
            self.Debug(f"[rebalance_moc] error: {e}")
        # Execution snapshot (post MOC decision)
        _helpers_perf_execution_snapshot(self)

    def print_monthly_metrics(self):
        try:
            _helpers_print_monthly_metrics(self)
        except Exception as e:
            self.Debug(f"[print_monthly_metrics] error: {e}")

    # Warmup lifecycle hook
    def OnWarmupFinished(self):
        try:
            self.warmup_done = True
            self.Debug("[Init] Warmup finished; trading unlocked.")
        except Exception:
            pass

    # Minute subs hook referenced by helpers.rebalance
    def _ensure_minute_for_trade_set(self, symbols):
        if not getattr(self, '_t60_window_active', False):
            return
        if not hasattr(self, '_minute_subscribed'):
            self._minute_subscribed = set()
        for s in symbols:
            try:
                if s not in self._minute_subscribed:
                    # Add an additional minute subscription for pricing during the T-60 window
                    self.AddEquity(s.Value, Resolution.Minute)
                    self._minute_subscribed.add(s)
            except Exception as e:
                try:
                    self.Debug(f"[minute-sub] Failed for {getattr(s,'Value',str(s))}: {e}")
                except Exception:
                    pass

    def _maybe_cleanup_minute_subs(self):
        if getattr(self, '_pending_minute_cleanup', False):
            # In case you track per-symbol minute flags, clear them here
            self._minute_subscribed = set()
            self._pending_minute_cleanup = False

    # Position reporting and abbreviated view
    def print_positions(self):
        try:
            _helpers_print_positions(self)
        except Exception as e:
            try:
                self.Debug(f"[print_positions] error: {e}")
            except Exception:
                pass

        try:
            _helpers_update_sub_portfolio_navs(self)
        except Exception:
            pass
    def _update_sub_portfolio_navs(self):
        # Collect all symbols referenced by current weights
        all_syms = set()
        for w in getattr(self, 'sub_portfolio_weights', {}).values():
            all_syms.update(w.keys())
        # Current closes
        cur_close = {}
        for sym in list(all_syms):
            try:
                if sym in self.Securities and self.Securities[sym].Price and self.Securities[sym].Price > 0:
                    cur_close[sym] = float(self.Securities[sym].Price)
            except Exception:
                continue
        # Daily returns when possible
        sym_returns = {}
        if getattr(self, '_prev_close', {}):
            for sym, px in cur_close.items():
                prev = self._prev_close.get(sym, None)
                if prev and prev > 0:
                    sym_returns[sym] = (px - prev) / prev
        # Update each sub-portfolio
        plotted_count = 0
        series_limit_raw = int(getattr(self, 'SUB_CHART_SERIES_LIMIT', 8) or 8)
        series_limit = max(1, min(series_limit_raw, int(getattr(self, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
        for period in sorted(getattr(self, 'sub_portfolio_weights', {}).keys()):
            weights = getattr(self, 'sub_portfolio_weights', {}).get(period, {})
            w_ret = 0.0
            for sym, w in weights.items():
                r = sym_returns.get(sym, None)
                if r is not None:
                    w_ret += float(w) * float(r)
            self.sub_portfolio_nav[period] = float(self.sub_portfolio_nav.get(period, 0.0)) * (1.0 + w_ret)
            nav_val = self.sub_portfolio_nav[period]
            self.sub_portfolio_nav_history[period].append(nav_val)
            if hasattr(self, 'sub_portfolio_nav_timeline'):
                self.sub_portfolio_nav_timeline[period].append(self.Time)
            # Plot NAV and drawdown series (NAV in log, DD as percent)
            try:
                hist = self.sub_portfolio_nav_history[period]
                if hist:
                    peak = max(hist)
                    dd_pct = ((peak - nav_val) / peak * 100.0) if peak > 0 else 0.0
                    # Split across multiple charts if exceeding series limit unless single panel is enabled
                    if getattr(self, 'SUB_CHART_SINGLE_PANEL', False):
                        suffix = ""
                    else:
                        chart_index = plotted_count // series_limit
                        suffix = "" if chart_index == 0 else f"-{chart_index+1}"
                    # Log NAV: use natural log; guard for non-positive values
                    if nav_val is not None and nav_val > 0:
                        log_nav = math.log(float(nav_val))
                        # Keep original series name to ensure chart visibility
                        self.Plot(f"SubNAV{suffix}", f"SP-{period}", log_nav)
                    # Sub-portfolio drawdown back to percent scale (negative values for deeper drawdowns)
                    self.Plot(f"SubDrawdown{suffix}", f"SP-{period}", -dd_pct)
                    plotted_count += 1
            except Exception:
                pass
        # Overview totals
        try:
            total_nav = sum(self.sub_portfolio_nav.values()) if hasattr(self, 'sub_portfolio_nav') else 0.0
            self.overview_nav = float(total_nav)
            if hasattr(self, 'overview_nav_history'):
                self.overview_nav_history.append(self.overview_nav)
            if hasattr(self, 'overview_nav_timeline'):
                self.overview_nav_timeline.append(self.Time)
            if getattr(self, 'overview_nav_history', None):
                peak_total = max(self.overview_nav_history)
                total_dd_pct = ((peak_total - self.overview_nav) / peak_total * 100.0) if peak_total > 0 else 0.0
                # Log-scaled overview NAV and drawdown
                if self.overview_nav > 0:
                    self.Plot("Overview", "TotalNAV (log)", math.log(float(self.overview_nav)))
                dd_frac_total = float(total_dd_pct) / 100.0 if total_dd_pct is not None else 0.0
                if dd_frac_total >= 0.0 and dd_frac_total < 1.0:
                    log_dd_total = -math.log(1.0 - dd_frac_total) if dd_frac_total > 0 else 0.0
                    self.Plot("Overview", "TotalDrawdown (logDD)", -100.0 * log_dd_total)
            # Update downside capture statistics against current benchmark
            try:
                bench = getattr(self, 'BENCHMARK_SYMBOL', None)
                bench_close = None
                if bench is not None and bench in self.Securities and self.Securities[bench].Price and self.Securities[bench].Price > 0:
                    bench_close = float(self.Securities[bench].Price)
                # Compute daily returns for strategy and benchmark
                strat_ret = None
                if hasattr(self, 'overview_nav_history') and len(self.overview_nav_history) >= 2:
                    prev_nav = float(self.overview_nav_history[-2])
                    cur_nav = float(self.overview_nav_history[-1])
                    if prev_nav > 0:
                        strat_ret = (cur_nav / prev_nav) - 1.0
                bench_ret = None
                if bench_close is not None and getattr(self, '_bench_prev_close', None):
                    prev_bench = float(self._bench_prev_close)
                    if prev_bench > 0:
                        bench_ret = (bench_close / prev_bench) - 1.0
                # Accumulate downside capture when benchmark is down
                if bench_ret is not None and bench_ret < 0 and strat_ret is not None:
                    self.downside_capture_num += float(strat_ret)
                    self.downside_capture_den += abs(float(bench_ret))
                    self.downside_capture_days += 1
                # Compute and plot running downside capture percentage
                if self.downside_capture_den > 0:
                    self.downside_capture_ratio = self.downside_capture_num / self.downside_capture_den
                    self.Plot("Overview", "DownsideCapture (%)", 100.0 * float(self.downside_capture_ratio))
                # Update previous benchmark close
                if bench_close is not None:
                    self._bench_prev_close = bench_close
                # Persist last day's returns for regime candidate alpha computation
                try:
                    if strat_ret is not None:
                        self._last_day_strat_ret = float(strat_ret)
                    else:
                        self._last_day_strat_ret = None
                    if bench_ret is not None:
                        self._last_day_bench_ret = float(bench_ret)
                    else:
                        self._last_day_bench_ret = None
                except Exception:
                    pass
            except Exception:
                pass
        except Exception:
            pass
        if cur_close:
            self._prev_close = dict(cur_close)

    # --- Sub-portfolio metrics print (CAGR, MaxDD, Alpha) ---
    def print_sub_portfolio_metrics(self):
        try:
            _helpers_print_sub_portfolio_metrics(self)
        except Exception as e:
            try:
                self.Debug(f"[print_sub_portfolio_metrics] error: {e}")
            except Exception:
                pass

    # --- Sub-portfolio audit print ---
    def print_sub_portfolio_audit(self):
        try:
            _helpers_print_sub_portfolio_audit(self)
        except Exception as e:
            try:
                self.Debug(f"[print_sub_portfolio_audit] error: {e}")
            except Exception:
                pass

    # --- End-of-month performance snapshot moved to helpers.performance ---

    # --- Order event logging with margin deficit parse ---
    def OnOrderEvent(self, orderEvent: OrderEvent):
        try:
            if orderEvent.Status == OrderStatus.Invalid:
                order = self.Transactions.GetOrderById(orderEvent.OrderId)
                symbol = getattr(order, 'Symbol', None)
                shares = getattr(order, 'Quantity', None)
                initial_margin = None
                free_margin = None
                margin_deficit = None
                margin_deficit_pct = None
                msg = getattr(orderEvent, 'Message', None)
                if isinstance(msg, str) and "Initial Margin:" in msg and "Free Margin:" in msg:
                    try:
                        parts = msg.split(",")
                        for part in parts:
                            if "Initial Margin:" in part:
                                initial_margin = float(part.split("Initial Margin:")[-1].strip())
                            if "Free Margin:" in part:
                                free_margin = float(part.split("Free Margin:")[-1].strip())
                        if initial_margin is not None and free_margin is not None:
                            margin_deficit = initial_margin - free_margin
                            n = len(getattr(self, 'sub_portfolios', {})) or 1
                            pv = float(self.Portfolio.TotalPortfolioValue) * float(getattr(self, 'long_leverage', 1.0)) * (100.0 - float(getattr(self, 'CASH_PERCENT', 0.0))) / 100.0
                            sub_portfolio_value = pv / n if n > 0 else pv
                            margin_deficit_pct = (margin_deficit / sub_portfolio_value * 100.0) if sub_portfolio_value else None
                    except Exception:
                        pass
                self.Log(
                    f"Order Error: ids: [{orderEvent.OrderId}], Symbol: {symbol.Value if hasattr(symbol,'Value') else str(symbol)}, "
                    f"Shares: {shares}, Insufficient buying power to complete order (Value:[{orderEvent.FillPrice}]), Reason: {orderEvent.Message}"
                )
                if margin_deficit is not None and margin_deficit_pct is not None:
                    self.Log(f"Margin Deficit: ${margin_deficit:.2f} ({margin_deficit_pct:.2f}% of sub-portfolio value)")
                return
        except Exception:
            pass

        # Delegate lifecycle tracking to helper
        try:
            _helpers_perf_on_order_event(self, orderEvent)
        except Exception:
            pass

    # --- Regime model used by helpers.data_sources at T-60 ---
    def calculate_regime(self):
        try:
            eq = getattr(self, 'REGIME_EQ', None)
            fi = getattr(self, 'REGIME_FI', None)
            if eq is None or fi is None:
                return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')
            end_time = self.Time
            n = int(getattr(self, 'SMA_PERIOD', 50))
            lookback_days = max(1, n * 2)
            raw_hist_eq = self.History([eq], end_time - timedelta(days=lookback_days), end_time, Resolution.Daily)
            raw_hist_fi = self.History([fi], end_time - timedelta(days=lookback_days), end_time, Resolution.Daily)
            if raw_hist_eq is None or raw_hist_eq.empty or raw_hist_fi is None or raw_hist_fi.empty:
                return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')
            try:
                eq_close = raw_hist_eq.loc[eq]['close'] if 'close' in raw_hist_eq.columns else raw_hist_eq.loc[eq].close
                fi_close = raw_hist_fi.loc[fi]['close'] if 'close' in raw_hist_fi.columns else raw_hist_fi.loc[fi].close
            except Exception:
                return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')
            # Drop today's partial bar to avoid intraday-vs-close inconsistencies
            try:
                today = self.Time.date()
                eq_close = eq_close[eq_close.index.date < today]
                fi_close = fi_close[fi_close.index.date < today]
            except Exception:
                pass
            # Keep exactly last n completed closes when possible
            eq_close = eq_close.dropna().tail(n)
            fi_close = fi_close.dropna().tail(n)
            if len(eq_close) < n or len(fi_close) < n:
                try:
                    if getattr(self, 'PRINT_REGIME_SUMMARY', False):
                        self.Log(f"[Regime] Fallback: insufficient completed bars eq={len(eq_close)} fi={len(fi_close)} (need {n}) at {self.Time:%Y-%m-%d %H:%M}")
                except Exception:
                    pass
                return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')
            if len(eq_close) == 0 or len(fi_close) == 0:
                return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')
            try:
                eq_last = float(eq_close.iloc[-1])
                fi_last = float(fi_close.iloc[-1])
                r_last = (eq_last / fi_last) if fi_last != 0 else float('nan')
            except Exception:
                r_last = float('nan')
            try:
                # Align indices before computing ratio to ensure equal dates
                ratio = (eq_close.align(fi_close, join='inner')[0] / eq_close.align(fi_close, join='inner')[1])
                sma_last = float(ratio.dropna().tail(n).mean())
            except Exception:
                sma_last = r_last
            regime = 'Risk-on' if r_last >= sma_last else 'Risk-off'
            # Optional: also compute an intraday ratio using current prices for visibility
            try:
                if getattr(self, 'PRINT_REGIME_SUMMARY', False):
                    eq_px = None
                    fi_px = None
                    try:
                        if eq in self.Securities and self.Securities[eq].Price and self.Securities[eq].Price > 0:
                            eq_px = float(self.Securities[eq].Price)
                        if fi in self.Securities and self.Securities[fi].Price and self.Securities[fi].Price > 0:
                            fi_px = float(self.Securities[fi].Price)
                    except Exception:
                        pass
                    intraday_ratio = (eq_px / fi_px) if (eq_px and fi_px and fi_px != 0) else float('nan')
                    self.Log(f"[RegimeDecision] as-of prev close: ratio={r_last:.6f}, sma({n})={sma_last:.6f} -> {regime}; intraday ratio={intraday_ratio if intraday_ratio==intraday_ratio else float('nan')}")
            except Exception:
                pass
            regime_eq = float(eq_close.iloc[-1])
            regime_fi = float(fi_close.iloc[-1])
            return regime, r_last, sma_last, regime_eq, regime_fi
        except Exception:
            return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')

    # --- End-of-run summary ---
    def OnEndOfAlgorithm(self):
        try:
            if getattr(self, 'sub_port_metrics', False):
                # Per-sub metrics (CAGR, MaxDD, Alpha/Beta)
                try:
                    _helpers_print_sub_portfolio_metrics(self)
                except Exception:
                    pass
            # Downside Capture summary (strategy vs benchmark) - independent flag or metrics
            try:
                if bool(getattr(self, 'PRINT_DOWNSIDE_CAPTURE_SUMMARY', False)) or getattr(self, 'sub_port_metrics', False):
                    ratio = getattr(self, 'downside_capture_ratio', float('nan'))
                    days = int(getattr(self, 'downside_capture_days', 0) or 0)
                    bench = getattr(self, 'BENCHMARK_SYMBOL', None)
                    bench_label = getattr(bench, 'Value', str(bench)) if bench is not None else 'Benchmark'
                    if ratio == ratio and days > 0:
                        self.Log(f"Downside Capture ({bench_label}): {100.0 * float(ratio):.2f}% over {days} benchmark down days")
                    else:
                        self.Log(f"Downside Capture ({bench_label}): N/A (no benchmark down days)")
            except Exception:
                pass
            # End-of-run funding/margin summary
            try:
                if bool(getattr(self, 'PRINT_FUNDING_SUMMARY', True)):
                    long_margin_total = float(getattr(self, 'cumulative_long_margin_cost', 0.0) or 0.0)
                    short_rebate_total = float(getattr(self, 'cumulative_short_rebate', 0.0) or 0.0)
                    stock_borrow_fee_total = float(getattr(self, 'cumulative_stock_borrow_fee', 0.0) or 0.0)
                    short_margin_interest_total = float(getattr(self, 'cumulative_short_margin_interest_cost', 0.0) or 0.0)
                    net_total_cost = (long_margin_total + stock_borrow_fee_total + short_margin_interest_total) - short_rebate_total
                    # Settings summary
                    lm_bps = int(getattr(self, 'LONG_MARGIN_SPREAD_BPS', 0))
                    sb_bps = int(getattr(self, 'SHORT_BORROW_SPREAD_BPS', 0))
                    stkb_bps = int(getattr(self, 'STOCK_BORROW_FEE_BPS', 0))
                    self.Log("=== Funding Summary (EndOfAlgorithm) ===")
                    self.Log(f"Settings: LONG_MARGIN_SPREAD_BPS={lm_bps}, SHORT_BORROW_SPREAD_BPS={sb_bps}, STOCK_BORROW_FEE_BPS={stkb_bps}, ShortNetting=Always")
                    self.Log(f"Totals: Long Margin=${long_margin_total:.2f}, Stock Borrow Fee=${stock_borrow_fee_total:.2f}, Short Rebate=${short_rebate_total:.2f}, Short Margin Interest=${short_margin_interest_total:.2f}")
                    self.Log(f"Net Funding Cost=${net_total_cost:.2f}")
            except Exception:
                pass
            # No passive regime optimization summary; use QC Optimization over SMA_PERIOD

            # End-of-run positions timeline printing moved to helper
            try:
                _helpers_perf_on_end_of_algorithm(self)
            except Exception:
                pass
        except Exception:
            pass
try:
    from AlgorithmImports import *
except ImportError:
    class BrokerageName: InteractiveBrokersBrokerage = 0
    class Resolution: Daily = 0; Minute = 1
    class Chart:
        def __init__(self, name): self.name = name; self.series = []
        def AddSeries(self, series): self.series.append(series)
    class Series:
        def __init__(self, name, series_type, index): self.name = name; self.series_type = series_type; self.index = index
    class SeriesType:
        Line = 0; Scatter = 1; Bar = 2; Candle = 3
    class QCAlgorithm: pass
    class OrderStatus:
        Filled = 0; PartiallyFilled = 1; Canceled = 2; None_ = 3; Invalid = 4; Submitted = 5; New = 6; Accepted = 7; Held = 8; Rejected = 9; PendingCancel = 10; PendingReplace = 11; PendingSubmit = 12; Stopped = 13; Suspended = 14; Calculated = 15; Expired = 16; Pending = 17; Unknown = 18
    class Slice: pass
    class OrderDirection:
        Buy = 0
        Sell = 1
    class OrderEvent: pass
import math
import numpy as np
from datetime import timedelta
from helpers.analytics import print_sub_portfolio_metrics, print_sub_portfolio_audit, print_positions, update_sub_portfolio_navs, print_monthly_metrics
from helpers.reporting import log_message, debug_message, log_commission, log_metrics_report, log_period_metrics, log_audit, log_sub_portfolio, log_universe, log_cash_filter, log_ls_mode, log_normalization, log_sub_ports, log_sub_ports_range, log_costs
from helpers.metrics_utils import calculate_cagr, calculate_max_drawdown, annualized_stdev, tracking_error, sharpe_ratio, sortino_ratio, alpha_beta, information_ratio, up_capture, down_capture, fmt
from helpers.performance import on_order_event
from helpers.costs import apply_margin_and_borrow_costs
from helpers.data_sources import prewarm_minute_data
from helpers.rebalance import rebalance_moc
from helpers.config import configure_logging_flags, configure_universe
class MainAlgorithm(QCAlgorithm):
    def _can_trade_now(self):
        return not getattr(self, 'IsWarmingUp', False)

    def Initialize(self):
        # Set LongLeverage from QC Parameter if present (for all later logic)
        _ll_param = self.GetParameter("LongLeverage")
        if _ll_param is not None and str(_ll_param).strip() != "":
            try:
                self.LongLeverage = float(str(_ll_param).strip())
            except Exception:
                self.LongLeverage = 1.30
        else:
            self.LongLeverage = 1.30
        # Always set self.long_leverage to match at the start
        self.long_leverage = self.LongLeverage

        # --- CLASSIC HEADROOM APPROACH ---
        # Set all securities to leverage 4.0 for margin headroom
        self.master_leverage = 4.0
        self.SetSecurityInitializer(lambda security: security.SetLeverage(self.master_leverage))
        try:
            for sec in list(getattr(self, 'Securities', {}).Values):
                try:
                    sec.SetLeverage(self.master_leverage)
                except Exception:
                    continue
        except Exception:
            pass
        # All position sizing and order logic should use self.long_leverage for actual exposure, not self.master_leverage

        # Metrics debug logs parameter
        param = self.GetParameter("METRICS_DEBUG_LOGS")
        self.METRICS_DEBUG_LOGS = str(param).strip().lower() in ("1", "true", "yes", "y", "on") if param is not None else False

        self.Log("[QC LOG TEST] Initialize called. If you see this, logging works.")

        from helpers.fred import Fred
        self.SetStartDate(2015, 10, 31)
        self.SetEndDate(2025, 12, 5)
        self.initialcash = 100000
        self.SetCash(self.initialcash)
        self.SetTimeZone("America/New_York")
        param = self.GetParameter("LOG_DAILY_COSTS")
        self.LOG_DAILY_COSTS = str(param).strip().lower() in ("1", "true", "yes", "y", "on") if param is not None else False

        self.fred_3month_log = False
        self.fred_3month = self.AddData(Fred, "DTB3", Resolution.Daily).Symbol
        # ...existing code...
        self.USE_MINUTE_DATA = False
        self.ENABLE_DAILY_RANKING_SNAPSHOT = False
        self._t60_window_active = False
        self._pending_minute_cleanup = False
        self._minute_subscribed = set()
        self.R2 = True
        self.STOCK_BORROW_FEE_BPS = 50
        self.LONG_MARGIN_SPREAD_BPS = 75
        self.SHORT_BORROW_SPREAD_BPS = 50
        self.cumulative_long_margin_cost = 0.0
        self.ALLOW_TRADES_DURING_WARMUP = False
        self.APPLY_COSTS = True
        _lm = self.GetParameter("LONG_MARGIN_SPREAD_BPS")
        if _lm is not None and str(_lm).strip() != "":
            self.LONG_MARGIN_SPREAD_BPS = int(float(str(_lm).strip()))
        _atdw = self.GetParameter("ALLOW_TRADES_DURING_WARMUP")
        if _atdw is not None and str(_atdw).strip() != "":
            s = str(_atdw).strip().lower()
            self.ALLOW_TRADES_DURING_WARMUP = s in ("1", "true", "yes", "y", "on")
        _sb = self.GetParameter("SHORT_BORROW_SPREAD_BPS")
        if _sb is not None and str(_sb).strip() != "":
            self.SHORT_BORROW_SPREAD_BPS = int(float(str(_sb).strip()))
        _sf = self.GetParameter("STOCK_BORROW_FEE_BPS")
        if _sf is not None and str(_sf).strip() != "":
            self.STOCK_BORROW_FEE_BPS = int(float(str(_sf).strip()))
        log_costs(self, int(getattr(self,'LONG_MARGIN_SPREAD_BPS',0)), int(getattr(self,'SHORT_BORROW_SPREAD_BPS',0)), int(getattr(self,'STOCK_BORROW_FEE_BPS',0)))

        self._last_eod_date = None

        _p = (self.GetParameter("LOG_PROFILE") or "summary").strip().lower()
        log_profile = _p if _p in ("summary", "detailed", "audit") else "summary"
        configure_logging_flags(self, profile=log_profile)

        self.fred_3month = self.AddData(Fred, "DTB3", Resolution.Daily).Symbol

        def _get_univ_param():
            for _k in ("UNIVERSE", "Universe", "universe"):
                try:
                    _v = self.GetParameter(_k)
                    if _v is not None and str(_v).strip() != "":
                        return _v
                except Exception:
                    continue
            return None
        _u = _get_univ_param()
        univ = (str(_u).strip().upper() if _u is not None and str(_u).strip() != "" else "EQUITY")
        if univ not in ("EQUITY", "BONDS"):
            univ = "EQUITY"
        configure_universe(self, universe_type=univ, use_minute=self.USE_MINUTE_DATA)
        log_universe(self, univ)
        # ...removed orphaned except/pass...
        # ...removed stray try...
        lm = float(getattr(self, 'LONG_MOMENTUM_MULTIPLIER', 0.0) or 0.0)
        sm = float(getattr(self, 'SHORT_MOMENTUM_MULTIPLIER', 0.0) or 0.0)
        vm = float(getattr(self, 'VOL_MULTIPLIER', 0.0) or 0.0)
        log_message(self, f"[Factors] Active: LONG_MOMENTUM_MULTIPLIER={lm:.2f}, SHORT_MOMENTUM_MULTIPLIER={sm:.2f}, VOL_MULTIPLIER={vm:.2f}")
        # ...removed orphaned except/pass...
        raw_cft = self.GetParameter("CASH_FILTER_TYPES")
        if raw_cft is not None and str(raw_cft).strip() != "":
            s = str(raw_cft).strip().lower()
            if s.startswith("[") and s.endswith("]"):
                s = s[1:-1]
            parts = [p.strip() for p in (s.replace(" ", ",").split(",")) if p.strip() != ""]
            allowed = {"industry", "sector"}
            parsed = [p for p in parts if p in allowed]
            if str(raw_cft).strip() in ("[]", "[ ]"):
                parsed = []
            if parsed is not None:
                self.CASH_FILTER_TYPES = list(parsed)
        # ...removed orphaned except/pass...
        raw_sid = self.GetParameter("CASH_FILTER_SID")
        if raw_sid is not None and str(raw_sid).strip() != "":
            ticker = str(raw_sid).strip().upper()
            try:
                sec = self.AddEquity(ticker, Resolution.Daily)
                self.CASH_FILTERING_SID = sec.Symbol
            except Exception:
                try:
                    self.CASH_FILTERING_SID = self.Symbol(ticker)
                except Exception:
                    pass
        # ...removed orphaned except/pass...
        cft = list(getattr(self, 'CASH_FILTER_TYPES', []))
        sid = getattr(self, 'CASH_FILTERING_SID', None)
        log_cash_filter(self, cft, sid)
        # ...removed orphaned except/pass...
        if getattr(self, 'FIXED_UNIVERSE_TYPE', 'EQUITY') == 'BONDS':
            self.long_leverage = 1.00
            _bl = self.GetParameter("BondLeverage")
            if _bl is not None and str(_bl).strip() != "":
                try:
                    self.long_leverage = max(0.0, float(str(_bl).strip()))
                except Exception:
                    pass
        # ...removed orphaned except/pass...
        self.exchange = self.Securities[self.jpm.Symbol].Exchange
        # ...removed orphaned except/pass...
        if not hasattr(self, 'regime'):
            self.regime = 'Risk-on'

        self.LONG_PERIOD = 124
        self.SHORT_PERIOD = self.LONG_PERIOD // 2
        self.VOL_PERIOD = self.LONG_PERIOD // 2
        self.SMA_PERIOD = self.LONG_PERIOD // 2
        _sma = self.GetParameter("SMA_PERIOD")
        if _sma is not None and str(_sma).strip() != "":
            v = int(float(str(_sma).strip()))
            if v > 1:
                self.SMA_PERIOD = v
        # ...removed orphaned except/pass...
        self.SUB_PORT_COUNT_EACH_SIDE = 10
        self.SUB_PORT_GAP = 2
        count_each_side = int(self.SUB_PORT_COUNT_EACH_SIDE)
        gap = int(self.SUB_PORT_GAP)
        _range_raw = self.GetParameter("SUB_PORT_RANGE")
        if _range_raw is not None and str(_range_raw).strip() != "":
            s = str(_range_raw).strip().lower().replace(" ", "")
            parts = s.split("/") if "/" in s else (s.split(",") if "," in s else [s])
            if len(parts) >= 1 and parts[0] != "":
                count_each_side = max(0, int(float(parts[0])))
            if len(parts) >= 2 and parts[1] != "":
                gap = max(0, int(float(parts[1])))
        # ...removed orphaned except/pass...
        step = 1 if gap <= 0 else gap
        periods = []
        for k in range(-count_each_side, count_each_side + 1):
            p = int(self.LONG_PERIOD + k * step)
            if p > 0:
                periods.append(p)
        # ...removed orphaned except/pass...
        if periods:
            self.LONG_PERIODS = sorted(set(periods))
        else:
            self.LONG_PERIODS = [int(self.LONG_PERIOD)]
            try:
                log_sub_portfolio(self, f"[Fallback] Empty/invalid SUB_PORT_RANGE (count_each_side={count_each_side}, step={step}); using minimal fallback: [{int(self.LONG_PERIOD)}]")
            except Exception:
                pass
        log_sub_ports(self, self.LONG_PERIOD, count_each_side, step, len(self.LONG_PERIODS))
        # ...removed orphaned except/pass...
        if getattr(self, 'LONG_PERIODS', None):
            lp = list(self.LONG_PERIODS)
            log_sub_ports_range(self, min(lp), max(lp), self.LONG_PERIOD, step)
        # ...removed orphaned except/pass...
        self.sub_portfolios = {}
        for period in self.LONG_PERIODS:
            self.sub_portfolios[period] = {
                "LONG_PERIOD": period,
                "SHORT_PERIOD": period // 2,
                "VOL_PERIOD": period // 2,
                "ranking": None,
                "topranked": [],
                "bottomranked": [],
            }
        n_sp = len(self.LONG_PERIODS) if self.LONG_PERIODS else 1
        initial_sub_value = self.initialcash / n_sp
        self.sub_portfolio_equity_history = {period: [] for period in self.LONG_PERIODS}
        self.sub_portfolio_nav = {period: float(initial_sub_value) for period in self.LONG_PERIODS}
        self.sub_portfolio_nav_history = {period: [float(initial_sub_value)] for period in self.LONG_PERIODS}
        self.sub_portfolio_nav_timeline = {period: [self.StartDate] for period in self.LONG_PERIODS}
        self.overview_nav = float(sum(self.sub_portfolio_nav.values())) if self.sub_portfolio_nav else float(self.initialcash)
        self.overview_nav_history = [float(self.overview_nav)]
        self.overview_nav_timeline = [self.StartDate]
        self.sub_portfolio_weights = {period: {} for period in self.LONG_PERIODS}
        self._prev_close = {}
        self.downside_capture_num = 0.0
        self.downside_capture_den = 0.0
        self.downside_capture_days = 0
        self.downside_capture_ratio = float('nan')
        self._bench_prev_close = None
        self._eom_liquidated = []
        self._last_eom_month = None
        self._ledger_opens = []
        self._ledger_closes = []
        self._ledger_eom_positions = []
        self.SUB_CHART_SERIES_SAFE_LIMIT = 7
        existing_limit = int(getattr(self, 'SUB_CHART_SERIES_LIMIT', 8) or 8)
        _lim_raw = self.GetParameter("SUB_CHART_SERIES_LIMIT")
        if _lim_raw is not None and str(_lim_raw).strip() != "":
            _lim_val = int(str(_lim_raw).strip())
            if _lim_val <= 0:
                _lim_val = existing_limit or 8
            self.SUB_CHART_SERIES_LIMIT = max(1, min(_lim_val, int(getattr(self, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
        else:
            self.SUB_CHART_SERIES_LIMIT = max(1, min(existing_limit or 8, int(getattr(self, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
        existing_single = bool(getattr(self, 'SUB_CHART_SINGLE_PANEL', False))
        _single_raw = self.GetParameter("SUB_CHART_SINGLE_PANEL")
        if _single_raw is not None and str(_single_raw).strip() != "":
            _single = str(_single_raw).strip().lower()
            self.SUB_CHART_SINGLE_PANEL = _single in ("1", "true", "yes", "y", "on", "all")
        else:
            self.SUB_CHART_SINGLE_PANEL = existing_single



        try:

            n_sp = len(self.LONG_PERIODS) if self.LONG_PERIODS else 0
            series_limit_raw = int(getattr(self, 'SUB_CHART_SERIES_LIMIT', 8) or 8)
            series_limit = max(1, min(series_limit_raw, int(getattr(self, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
            periods_sorted = sorted(self.LONG_PERIODS)
            if getattr(self, 'SUB_CHART_SINGLE_PANEL', False):
                for base in ("SubNAV", "SubDrawdown"):
                    try:
                        ch = Chart(base)
                        for p in periods_sorted:
                            ch.AddSeries(Series(f"SP-{p}", SeriesType.Line, 0))
                        self.AddChart(ch)
                    except Exception:
                        pass
            else:
                windows = max(1, int((n_sp + series_limit - 1) // series_limit))
                for i in range(windows):
                    suffix = "" if i == 0 else f"-{i+1}"
                    start = i * series_limit
                    end = min(start + series_limit, n_sp)
                    window_periods = periods_sorted[start:end]
                    for base in ("SubNAV", "SubDrawdown"):
                        try:
                            ch = Chart(f"{base}{suffix}")
                            for p in window_periods:
                                ch.AddSeries(Series(f"SP-{p}", SeriesType.Line, 0))
                            self.AddChart(ch)
                        except Exception:
                            pass
        except Exception:
            pass

        def _parse_bool(val):
            try:
                return str(val).strip().lower() in ("1", "true", "yes", "y", "on")
            except Exception:
                return False
        try:
            _dbm = self.GetParameter("DIAG_BOUNDARY_META")
            if _dbm is not None and str(_dbm).strip() != "":
                self.DIAG_BOUNDARY_META = _parse_bool(_dbm)
        except Exception:
            pass
        try:
            _drb = self.GetParameter("DIAG_RANKING_BOUNDARY")
            if _drb is not None and str(_drb).strip() != "":
                self.DIAG_RANKING_BOUNDARY = _parse_bool(_drb)
        except Exception:
            pass
        try:
            _kb = self.GetParameter("RANKING_BOUNDARY_K")
            if _kb is not None and str(_kb).strip() != "":
                self.RANKING_BOUNDARY_K = int(str(_kb).strip())
        except Exception:
            pass
        try:
            _dsd = self.GetParameter("DIAG_SERIES_DUMP")
            _dsym = None
            try:
                _dsym_raw = self.GetParameter("DIAG_SERIES_DUMP_SYMBOL")
                if _dsym_raw is not None and str(_dsym_raw).strip() != "":
                    _dsym = str(_dsym_raw).strip().upper()
            except Exception:
                _dsym = None
            if _dsd is not None and str(_dsd).strip() != "":
                s = str(_dsd).strip()
                if _parse_bool(s):
                    self.DIAG_SERIES_DUMP = True
                else:
                    self.DIAG_SERIES_DUMP = True
                    self.DIAG_SERIES_DUMP_SYMBOL = s.upper()
            if _dsym is not None and _dsym != "":
                self.DIAG_SERIES_DUMP = True
                self.DIAG_SERIES_DUMP_SYMBOL = _dsym
        except Exception:
            pass

        try:
            sma_p = int(getattr(self, 'SMA_PERIOD', 50))
            self.regime_mode_msg = f"[Regime] Active: period SMA over {sma_p} bars"
            self._regime_mode_logged = False
            def _log_regime_mode():
                if getattr(self, '_regime_mode_logged', False): return
                try: self.Log(self.regime_mode_msg)
                except Exception: pass
                try:
                    mode = getattr(self.Settings, 'DataNormalizationMode', None)
                    mode_name = getattr(mode, 'name', None) or str(mode)
                    self.Log(f"[Normalization] Active: {mode_name}")
                except Exception: pass
                self._regime_mode_logged = True
            self.Schedule.On(
                self.DateRules.EveryDay(self.spy),
                self.TimeRules.AfterMarketOpen(self.spy, 1),
                _log_regime_mode
            )
        except Exception:
            pass

        try:
            plotted_count = 0
            series_limit_raw = int(getattr(self, 'SUB_CHART_SERIES_LIMIT', 8) or 8)
            series_limit = max(1, min(series_limit_raw, int(getattr(self, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
            for period in sorted(self.LONG_PERIODS):
                nav_val = float(self.sub_portfolio_nav.get(period, initial_sub_value))
                if getattr(self, 'SUB_CHART_SINGLE_PANEL', False):
                    suffix = ""
                else:
                    chart_index = plotted_count // series_limit
                    suffix = "" if chart_index == 0 else f"-{chart_index+1}"
                if nav_val > 0:
                    self.Plot(f"SubNAV{suffix}", f"SP-{period}", math.log(nav_val))
                self.Plot(f"SubDrawdown{suffix}", f"SP-{period}", 0.0)
                plotted_count += 1
        except Exception:
            pass

        self.INDUSTRY_TOPN = 10
        self.SECTOR_TOPN = 10
        self.INDUSTRY_BOTN = self.INDUSTRY_TOPN // 2
        self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = 0.00
        self.HEDGE_MULTIPLIER_INDUSTRY_RISK_OFF = 0.00
        if not hasattr(self, 'LongShortMode'):
            self.LongShortMode = "off"
        try:
            ls_mode = str(getattr(self, 'LongShortMode', 'off')).strip().lower()
            long_lev_default = float(getattr(self, 'LongLeverage', 1.30))
            short_pct_default = float(getattr(self, 'ShortMassPercent', 30.0))
            short_count_default = getattr(self, 'ShortCount', None)
            _lsm = self.GetParameter("LongShortMode")
            if _lsm is not None and str(_lsm).strip() != "":
                ls_mode = str(_lsm).strip().lower()
            _ll = self.GetParameter("LongLeverage")
            if _ll is not None and str(_ll).strip() != "":
                try:
                    long_lev_default = float(str(_ll).strip())
                except Exception:
                    pass
            _sp = self.GetParameter("ShortMassPercent")
            if _sp is not None and str(_sp).strip() != "":
                try:
                    short_pct_default = float(str(_sp).strip())
                except Exception:
                    pass
            _sc = self.GetParameter("ShortCount")
            if _sc is not None and str(_sc).strip() != "":
                try:
                    short_count_default = int(str(_sc).strip())
                except Exception:
                    pass
            if ls_mode in ("130-30", "130/30", "ls", "longshort", "l/s"):
                ls_mode = "130-30"
            elif ls_mode in ("100-30", "100/30"):
                ls_mode = "100-30"
            elif ls_mode in ("none", "off", "long-only", "longonly"):
                ls_mode = "off"
            else:
                try:
                    sep = "-" if "-" in ls_mode else ("/" if "/" in ls_mode else None)
                    if sep is not None:
                        parts = [p.strip() for p in ls_mode.split(sep) if p is not None]
                        if len(parts) == 2 and parts[0].replace(".", "", 1).isdigit() and parts[1].replace(".", "", 1).isdigit():
                            l_val = float(parts[0]) / 100.0
                            s_val = float(parts[1]) / 100.0
                            if l_val > 0.0 and s_val >= 0.0:
                                ls_mode = "custom"
                                long_lev_default = l_val
                                short_pct_default = s_val * 100.0
                except Exception:
                    pass
            if ls_mode == "130-30":
                self.long_leverage = 1.30
                self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = 0.30
            elif ls_mode == "100-30":
                self.long_leverage = 1.00
                self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = 0.30
            elif ls_mode == "custom":
                self.long_leverage = max(0.0, float(long_lev_default))
                self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = max(0.0, float(short_pct_default) / 100.0)
            else:
                # Long-only: do not overwrite self.long_leverage, use value set at top
                self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = 0.00
            if short_count_default is not None:
                try:
                    self.INDUSTRY_BOTN = max(0, int(short_count_default))
                except Exception:
                    self.INDUSTRY_BOTN = self.INDUSTRY_TOPN // 2
            else:
                self.INDUSTRY_BOTN = self.INDUSTRY_TOPN // 2
            self.HEDGE_MULTIPLIER_INDUSTRY_RISK_OFF = 0.00
            try:
                ls_label = ls_mode
                if ls_mode == "custom":
                    ls_label = f"custom (L={self.long_leverage:.2f}, S={self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON:.2f}, botn={self.INDUSTRY_BOTN})"
                elif ls_mode == "130-30":
                    ls_label = f"130/30 (botn={self.INDUSTRY_BOTN})"
                elif ls_mode == "100-30":
                    ls_label = f"100/30 (botn={self.INDUSTRY_BOTN})"
                else:
                    ls_label = f"off (long-only @ L={self.long_leverage:.2f})"
                log_ls_mode(self, ls_label)
                if ls_mode == "off":
                    try:
                        log_message(self, f"Long-only at {self.long_leverage:.2f}x")
                        self.SetRuntimeStatistic("Long Exposure", f"{self.long_leverage:.2f}x")
                    except Exception:
                        pass
            except Exception:
                pass
        except Exception:
            self.HEDGE_MULTIPLIER_INDUSTRY_RISK_ON = 0.00

        # (REMOVED) Do not overwrite global 4x leverage security initializer

        try:
            utype = getattr(self, 'FIXED_UNIVERSE_TYPE', 'EQUITY')
            if utype == 'EQUITY':
                has_shorts = float(getattr(self, 'HEDGE_MULTIPLIER_INDUSTRY_RISK_ON', 0.0)) > 0.0
                if has_shorts and hasattr(self, 'csm'):
                    self.SetBenchmark(self.csm)
                    self.BENCHMARK_SYMBOL = self.csm
                elif hasattr(self, 'iwv'):
                    self.SetBenchmark(self.iwv)
                    self.BENCHMARK_SYMBOL = self.iwv
        except Exception:
            pass
        try:
            bm = getattr(self, 'BENCHMARK_SYMBOL', None)
            bm_str = getattr(bm, 'Value', str(bm)) if bm is not None else 'None'
            log_message(self, f"[Benchmark] Active: {bm_str}")
        except Exception:
            pass
        self.CASH_PERCENT = 0.25
        self.PERCENTAGE_MOMENTUM = True
        self.VOL_INDUSTRY = 'R2'
        self.VOL_SECTOR = 'R2'
        self.RANKING_METRIC = "R2"

        self.SetWarmUp(max(self.LONG_PERIODS) * 2, Resolution.Daily)

        self.Schedule.On(
            self.DateRules.MonthEnd(self.spy),
            self.TimeRules.BeforeMarketClose(self.spy, 60),
            self.prewarm_minute_data
        )
        self.Schedule.On(
            self.DateRules.MonthEnd(self.spy),
            self.TimeRules.BeforeMarketClose(self.spy, 30),
            self.rebalance_moc
        )
        self.Schedule.On(
            self.DateRules.MonthEnd(self.spy),
            self.TimeRules.BeforeMarketClose(self.spy, 0),
            self.print_monthly_metrics
        )
        self.Schedule.On(
            self.DateRules.EveryDay(self.spy),
            self.TimeRules.AfterMarketOpen(self.spy, 5),
            self._maybe_cleanup_minute_subs
        )

        should_print_positions = (
            getattr(self, 'flagCommentRank', 'None') == 1 or
            getattr(self, 'POSITION_REPORTING_FLAG', False) or
            getattr(self, 'abbreviated_positions_print', False) or
            getattr(self, 'sub_port_positions', False)
        )
        if should_print_positions:
            self.Schedule.On(
                self.DateRules.MonthEnd(self.spy),
                self.TimeRules.BeforeMarketClose(self.spy, 0),
                lambda: print_positions(self)
            )
        if getattr(self, 'sub_port_audit', False):
            self.Schedule.On(
                self.DateRules.MonthEnd(self.spy),
                self.TimeRules.BeforeMarketClose(self.spy, 0),
                self.print_sub_portfolio_audit
            )
        if getattr(self, 'flagCommentRank', 'None') == 0:
            self.Schedule.On(
                self.DateRules.MonthEnd(self.spy),
                self.TimeRules.BeforeMarketClose(self.spy, 0),
                lambda: self.Log(f"daily equity: {self.Portfolio.TotalPortfolioValue}")
            )
        if getattr(self, 'PRINT_CLOSED_POSITIONS', False):
            self.Schedule.On(
                self.DateRules.MonthEnd(self.spy),
                self.TimeRules.BeforeMarketClose(self.spy, 0),
                self.print_closed_positions
            )

    def OnData(self, data):
        try:
            if hasattr(self, 'fred_3month') and self.fred_3month in data and hasattr(data[self.fred_3month], 'Value'):
                raw_val = data[self.fred_3month].Value
                if self.fred_3month_log:
                    self.Log(f"FRED.3Month raw value: {raw_val}")
                if raw_val is not None:
                    val = float(raw_val)
                    if val > 0:
                        self.fred_3month_rate = val
                        if self.fred_3month_log:
                            self.Log(f"FRED.3Month rate updated: {self.fred_3month_rate:.4f}")
                    else:
                        if self.fred_3month_log:
                            self.Log(f"FRED.3Month rate not updated (invalid or non-positive): {val}")
        except Exception as e:
            if self.fred_3month_log:
                self.Log(f"Error updating FRED.3Month rate: {e}")

    def _on_end_of_day_core(self):
        try:
            cur_date = None
            try:
                cur_date = self.Time.date()
            except Exception:
                cur_date = getattr(self, 'Time', None)
            if cur_date is not None and getattr(self, '_last_eod_date', None) == cur_date:
                return
            self._last_eod_date = cur_date
        except Exception:
            pass
        try:
            self._update_sub_portfolio_navs()
        except Exception:
            pass
        try:
            if getattr(self, 'APPLY_COSTS', True):
                apply_margin_and_borrow_costs(self)
        except Exception:
            pass

    def OnEndOfDay(self, symbol):
        # Gate all logging and data collection to only occur on or after StartDate
        if not hasattr(self, 'StartDate') or self.Time < self.StartDate:
            return
        try:
            if not hasattr(self, 'spy') or getattr(self, 'spy', None) is None:
                return
            if symbol != getattr(self, 'spy', None):
                return
            spy_sym = getattr(self, 'spy', None)
            sec = self.Securities.get(spy_sym) if hasattr(self, 'Securities') else None
            if not sec or getattr(sec, 'Price', 0) <= 0:
                return
        except Exception:
            return

        try:
            if not hasattr(self, 'daily_portfolio_returns'):
                self.daily_portfolio_returns = []
            if not hasattr(self, 'daily_benchmark_returns'):
                self.daily_benchmark_returns = []
            if not hasattr(self, 'daily_total_values'):
                self.daily_total_values = []

            try:
                nav = float(self.Portfolio.TotalPortfolioValue)
                prev_nav = getattr(self, '_prev_nav', None)
                if prev_nav is not None and prev_nav > 0:
                    port_ret = (nav - prev_nav) / prev_nav
                else:
                    port_ret = 0.0
                self._prev_nav = nav
            except Exception:
                nav = 0.0
                port_ret = 0.0

            try:
                bench_price = float(sec.Price)
                prev_bench = getattr(self, '_prev_bench', None)
                if prev_bench is not None and prev_bench > 0:
                    bench_ret = (bench_price - prev_bench) / prev_bench
                else:
                    bench_ret = 0.0
                self._prev_bench = bench_price
            except Exception:
                bench_price = 0.0
                bench_ret = 0.0


            self.daily_portfolio_returns.append(port_ret)
            self.daily_benchmark_returns.append(bench_ret)
            self.daily_total_values.append(nav)
            # Keep overview_nav_timeline in sync
            if not hasattr(self, 'overview_nav_timeline'):
                self.overview_nav_timeline = []
            self.overview_nav_timeline.append(self.Time)

        except Exception as e:
            self.Log(f"[metrics debug] OnEndOfDay exception: {e}")

    def prewarm_minute_data(self):
        try:
            prewarm_minute_data(self)
        except Exception as e:
            debug_message(self, f"[prewarm_minute_data] error: {e}")

    def rebalance_moc(self):
        if not self._can_trade_now():
            try:
                debug_message(self, "[Rebalance] Trading gated (warmup/suspended); skipping order submission")
            except Exception:
                pass
            return
        try:
            rebalance_moc(self)
        except Exception as e:
            debug_message(self, f"[rebalance_moc] error: {e}")

    def print_monthly_metrics(self):
        try: print_monthly_metrics(self)
        except Exception as e: debug_message(self, f"[print_monthly_metrics] error: {e}")

    def OnWarmupFinished(self):
        try: self.warmup_done = True; debug_message(self, "[Init] Warmup finished; trading unlocked.")
        except Exception: pass

    def _ensure_minute_for_trade_set(self, symbols):
        if not getattr(self, '_t60_window_active', False):
            return
        if not hasattr(self, '_minute_subscribed'):
            self._minute_subscribed = set()
        for s in symbols:
            try:
                if s not in self._minute_subscribed:
                    self.AddEquity(s.Value, Resolution.Minute)
                    self._minute_subscribed.add(s)
            except Exception as e:
                try:
                    debug_message(self, f"[minute-sub] Failed for {getattr(s,'Value',str(s))}: {e}")
                except Exception:
                    pass

    def _maybe_cleanup_minute_subs(self):
        if getattr(self, '_pending_minute_cleanup', False):
            self._minute_subscribed = set(); self._pending_minute_cleanup = False

        def print_positions(self):
            try: print_positions(self)
            except Exception as e: debug_message(self, f"[print_positions] error: {e}")
            try: update_sub_portfolio_navs(self)
            except Exception: pass
    def _update_sub_portfolio_navs(self):
        all_syms = set()
        for w in getattr(self, 'sub_portfolio_weights', {}).values():
            all_syms.update(w.keys())
        cur_close = {}
        for sym in list(all_syms):
            try:
                if sym in self.Securities and self.Securities[sym].Price and self.Securities[sym].Price > 0:
                    cur_close[sym] = float(self.Securities[sym].Price)
            except Exception:
                continue
        sym_returns = {}
        if getattr(self, '_prev_close', {}):
            for sym, px in cur_close.items():
                prev = self._prev_close.get(sym, None)
                if prev and prev > 0:
                    sym_returns[sym] = (px - prev) / prev
        plotted_count = 0
        series_limit_raw = int(getattr(self, 'SUB_CHART_SERIES_LIMIT', 8) or 8)
        series_limit = max(1, min(series_limit_raw, int(getattr(self, 'SUB_CHART_SERIES_SAFE_LIMIT', 7))))
        for period in sorted(getattr(self, 'sub_portfolio_weights', {}).keys()):
            weights = getattr(self, 'sub_portfolio_weights', {}).get(period, {})
            w_ret = 0.0
            for sym, w in weights.items():
                r = sym_returns.get(sym, None)
                if r is not None:
                    w_ret += float(w) * float(r)
            self.sub_portfolio_nav[period] = float(self.sub_portfolio_nav.get(period, 0.0)) * (1.0 + w_ret)
            nav_val = self.sub_portfolio_nav[period]
            self.sub_portfolio_nav_history[period].append(nav_val)
            if hasattr(self, 'sub_portfolio_nav_timeline'):
                self.sub_portfolio_nav_timeline[period].append(self.Time)
            try:
                hist = self.sub_portfolio_nav_history[period]
                if hist:
                    peak = max(hist)
                    dd_pct = ((peak - nav_val) / peak * 100.0) if peak > 0 else 0.0
                    if getattr(self, 'SUB_CHART_SINGLE_PANEL', False):
                        suffix = ""
                    else:
                        chart_index = plotted_count // series_limit
                        suffix = "" if chart_index == 0 else f"-{chart_index+1}"
                    if nav_val is not None and nav_val > 0:
                        log_nav = math.log(float(nav_val))
                        self.Plot(f"SubNAV{suffix}", f"SP-{period}", log_nav)
                    self.Plot(f"SubDrawdown{suffix}", f"SP-{period}", -dd_pct)
                    plotted_count += 1
            except Exception:
                pass
        try:
            total_nav = sum(self.sub_portfolio_nav.values()) if hasattr(self, 'sub_portfolio_nav') else 0.0
            self.overview_nav = float(total_nav)
            if hasattr(self, 'overview_nav_history'):
                self.overview_nav_history.append(self.overview_nav)
            if hasattr(self, 'overview_nav_timeline'):
                self.overview_nav_timeline.append(self.Time)
            if getattr(self, 'overview_nav_history', None):
                peak_total = max(self.overview_nav_history)
                total_dd_pct = ((peak_total - self.overview_nav) / peak_total * 100.0) if peak_total > 0 else 0.0
                if self.overview_nav > 0:
                    self.Plot("Overview", "TotalNAV (log)", math.log(float(self.overview_nav)))
                dd_frac_total = float(total_dd_pct) / 100.0 if total_dd_pct is not None else 0.0
                if dd_frac_total >= 0.0 and dd_frac_total < 1.0:
                    log_dd_total = -math.log(1.0 - dd_frac_total) if dd_frac_total > 0 else 0.0
                    self.Plot("Overview", "TotalDrawdown (logDD)", -100.0 * log_dd_total)
            try:
                bench = getattr(self, 'BENCHMARK_SYMBOL', None)
                bench_close = None
                if bench is not None and bench in self.Securities and self.Securities[bench].Price and self.Securities[bench].Price > 0:
                    bench_close = float(self.Securities[bench].Price)
                strat_ret = None
                if hasattr(self, 'overview_nav_history') and len(self.overview_nav_history) >= 2:
                    prev_nav = float(self.overview_nav_history[-2])
                    cur_nav = float(self.overview_nav_history[-1])
                    if prev_nav > 0:
                        strat_ret = (cur_nav / prev_nav) - 1.0
                bench_ret = None
                if bench_close is not None and getattr(self, '_bench_prev_close', None):
                    prev_bench = float(self._bench_prev_close)
                    if prev_bench > 0:
                        bench_ret = (bench_close / prev_bench) - 1.0
                if bench_ret is not None and bench_ret < 0 and strat_ret is not None:
                    self.downside_capture_num += float(strat_ret)
                    self.downside_capture_den += abs(float(bench_ret))
                    self.downside_capture_days += 1
                if self.downside_capture_den > 0:
                    self.downside_capture_ratio = self.downside_capture_num / self.downside_capture_den
                    self.Plot("Overview", "DownsideCapture (%)", 100.0 * float(self.downside_capture_ratio))
                if bench_close is not None:
                    self._bench_prev_close = bench_close
                try:
                    if strat_ret is not None:
                        self._last_day_strat_ret = float(strat_ret)
                    else:
                        self._last_day_strat_ret = None
                    if bench_ret is not None:
                        self._last_day_bench_ret = float(bench_ret)
                    else:
                        self._last_day_bench_ret = None
                except Exception:
                    pass
            except Exception:
                pass
        except Exception:
            pass
        if cur_close:
            self._prev_close = dict(cur_close)

    def print_sub_portfolio_metrics(self):
        try: print_sub_portfolio_metrics(self)
        except Exception as e: debug_message(self, f"[print_sub_portfolio_metrics] error: {e}")

    def print_sub_portfolio_audit(self):
        try: print_sub_portfolio_audit(self)
        except Exception as e: debug_message(self, f"[print_sub_portfolio_audit] error: {e}")

    def OnOrderEvent(self, orderEvent):
        try:
            from helpers.costs import calculate_commission

            if not hasattr(self, '_logged_order_ids') or getattr(self, '_logged_order_ids_date', None) != self.Time.date():
                self._logged_order_ids = set()
                self._logged_order_ids_date = self.Time.date()

            if getattr(self, 'LOG_DAILY_COSTS', False):
                if orderEvent.Status == OrderStatus.Filled and orderEvent.FillQuantity != 0:

                    event_date = getattr(orderEvent, 'UtcTime', getattr(orderEvent, 'Time', None))
                    if event_date is not None and event_date.date() == self.Time.date():
                        if orderEvent.OrderId not in self._logged_order_ids:
                            commission = calculate_commission(orderEvent.FillQuantity)
                            log_message(self, f"{event_date.date()} | OrderID: {orderEvent.OrderId}, Symbol: {orderEvent.Symbol}, Quantity: {orderEvent.FillQuantity}, Commission: {commission:.3f}")
                            self._logged_order_ids.add(orderEvent.OrderId)
        except Exception:
            pass
        try:
            if orderEvent.Status == OrderStatus.Invalid:
                order = self.Transactions.GetOrderById(orderEvent.OrderId)
                symbol = getattr(order, 'Symbol', None)
                shares = getattr(order, 'Quantity', None)
                initial_margin = None
                free_margin = None
                margin_deficit = None
                margin_deficit_pct = None
                msg = getattr(orderEvent, 'Message', None)
                if isinstance(msg, str) and "Initial Margin:" in msg and "Free Margin:" in msg:
                    try:
                        parts = msg.split(",")
                        for part in parts:
                            if "Initial Margin:" in part:
                                val = part.split("Initial Margin:")[-1].strip().rstrip('.')
                                # Remove any trailing non-numeric characters
                                val = ''.join(c for c in val if (c.isdigit() or c == '.' or c == '-'))
                                initial_margin = float(val)
                            if "Free Margin:" in part:
                                val = part.split("Free Margin:")[-1].strip().rstrip('.')
                                val = ''.join(c for c in val if (c.isdigit() or c == '.' or c == '-'))
                                free_margin = float(val)
                        if initial_margin is not None and free_margin is not None:
                            margin_deficit = initial_margin - free_margin
                            n = len(getattr(self, 'sub_portfolios', {})) or 1
                            pv = float(self.Portfolio.TotalPortfolioValue) * float(getattr(self, 'long_leverage', 1.0)) * (100.0 - float(getattr(self, 'CASH_PERCENT', 0.0))) / 100.0
                            sub_portfolio_value = pv / n if n > 0 else pv
                            margin_deficit_pct = (margin_deficit / sub_portfolio_value * 100.0) if sub_portfolio_value else None
                    except Exception:
                        pass
                log_message(self, f"Order Error: ids: [{orderEvent.OrderId}], Symbol: {symbol.Value if hasattr(symbol,'Value') else str(symbol)}, Shares: {shares}, Insufficient buying power to complete order (Value:[{orderEvent.FillPrice}]), Reason: {orderEvent.Message}")
                if margin_deficit is not None and margin_deficit_pct is not None:
                    log_message(self, f"Margin Deficit: ${margin_deficit:.2f} ({margin_deficit_pct:.2f}% of sub-portfolio value)")
                return
        except Exception:
            pass
        try:
            on_order_event(self, orderEvent)
        except Exception:
            pass

    def calculate_regime(self):
        try:
            eq = getattr(self, 'REGIME_EQ', None)
            fi = getattr(self, 'REGIME_FI', None)
            if eq is None or fi is None:
                return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')
            end_time = self.Time
            n = int(getattr(self, 'SMA_PERIOD', 50))
            lookback_days = max(1, n * 2)
            raw_hist_eq = self.History([eq], end_time - timedelta(days=lookback_days), end_time, Resolution.Daily)
            raw_hist_fi = self.History([fi], end_time - timedelta(days=lookback_days), end_time, Resolution.Daily)
            if raw_hist_eq is None or raw_hist_eq.empty or raw_hist_fi is None or raw_hist_fi.empty:
                return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')
            try:
                eq_close = raw_hist_eq.loc[eq]['close'] if 'close' in raw_hist_eq.columns else raw_hist_eq.loc[eq].close
                fi_close = raw_hist_fi.loc[fi]['close'] if 'close' in raw_hist_fi.columns else raw_hist_fi.loc[fi].close
            except Exception:
                return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')
            try:
                today = self.Time.date()
                eq_close = eq_close[eq_close.index.date < today]
                fi_close = fi_close[fi_close.index.date < today]
            except Exception:
                pass
            eq_close = eq_close.dropna().tail(n)
            fi_close = fi_close.dropna().tail(n)
            if len(eq_close) < n or len(fi_close) < n:
                try:
                    if getattr(self, 'PRINT_REGIME_SUMMARY', False):
                        self.Log(f"[Regime] Fallback: insufficient completed bars eq={len(eq_close)} fi={len(fi_close)} (need {n}) at {self.Time:%Y-%m-%d %H:%M}")
                except Exception:
                    pass
                return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')
            if len(eq_close) == 0 or len(fi_close) == 0:
                return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')
            try:
                eq_last = float(eq_close.iloc[-1])
                fi_last = float(fi_close.iloc[-1])
                r_last = (eq_last / fi_last) if fi_last != 0 else float('nan')
            except Exception:
                r_last = float('nan')
            try:
                ratio = (eq_close.align(fi_close, join='inner')[0] / eq_close.align(fi_close, join='inner')[1])
                sma_last = float(ratio.dropna().tail(n).mean())
            except Exception:
                sma_last = r_last
            regime = 'Risk-on' if r_last >= sma_last else 'Risk-off'
            try:
                if getattr(self, 'PRINT_REGIME_SUMMARY', False):
                    eq_px = None
                    fi_px = None
                    try:
                        if eq in self.Securities and self.Securities[eq].Price and self.Securities[eq].Price > 0:
                            eq_px = float(self.Securities[eq].Price)
                        if fi in self.Securities and self.Securities[fi].Price and self.Securities[fi].Price > 0:
                            fi_px = float(self.Securities[fi].Price)
                    except Exception:
                        pass
                    intraday_ratio = (eq_px / fi_px) if (eq_px and fi_px and fi_px != 0) else float('nan')
                    self.Log(f"[RegimeDecision] as-of prev close: ratio={r_last:.6f}, sma({n})={sma_last:.6f} -> {regime}; intraday ratio={intraday_ratio if intraday_ratio==intraday_ratio else float('nan')}")
            except Exception:
                pass
            regime_eq = float(eq_close.iloc[-1])
            regime_fi = float(fi_close.iloc[-1])
            return regime, r_last, sma_last, regime_eq, regime_fi
        except Exception:
            return 'Risk-on', float('nan'), float('nan'), float('nan'), float('nan')

    def OnEndOfAlgorithm(self):
        self.Log("[QC LOG TEST] OnEndOfAlgorithm called. If you see this, logging works.")
        self.Log("[QC LOG TEST] About to import print_final_metrics")
        try:
            from helpers.metrics_utils import print_final_metrics
            self.Log("[QC LOG TEST] Successfully imported print_final_metrics")
        except Exception as e:
            self.Log(f"[QC LOG TEST] Import failed: {e}")
            return
        # --- ALIGNMENT FIX: Ensure nav_timeline matches other arrays ---
        try:
            vals = getattr(self, 'daily_total_values', [])
            nav = getattr(self, 'overview_nav_timeline', [])
            if len(nav) > len(vals):
                self.Log(f"[metrics debug] Trimming nav_timeline from {len(nav)} to {len(vals)}")
                self.overview_nav_timeline = nav[:len(vals)]
            elif len(nav) < len(vals):
                self.Log(f"[metrics debug] Padding nav_timeline from {len(nav)} to {len(vals)}")
                if nav:
                    last_val = nav[-1]
                else:
                    last_val = self.Time
                self.overview_nav_timeline = nav + [last_val] * (len(vals) - len(nav))
        except Exception as e:
            self.Log(f"[metrics debug] nav_timeline alignment error: {e}")
        try:
            print_final_metrics(self)
        except Exception as e:
            self.Log(f"[QC LOG TEST] print_final_metrics call failed: {e}")




def write_metrics_data_tab(rets_df, writer, workbook):
    # --- Metrics Data Tab ---
    metrics_data_cols = [
        'Date', 'Portfolio Value', 'Portfolio Return', 'Portfolio Drawdown', 'Benchmark Value', 'Benchmark Return', 'Benchmark Drawdown'
    ]
    metrics_data = pd.DataFrame({
        'Date': rets_df.index,
        'Portfolio Value': rets_df['total_value'],
        'Portfolio Return': rets_df['net_return'],
        'Portfolio Drawdown': (rets_df['total_value'] / rets_df['total_value'].cummax() - 1),
        'Benchmark Value': rets_df['Benchmark'],
        'Benchmark Return': rets_df['Benchmark'].pct_change().fillna(0),
        'Benchmark Drawdown': (rets_df['Benchmark'] / rets_df['Benchmark'].cummax() - 1)
    })
    metrics_data.reset_index(drop=True, inplace=True)
    # Write daily metrics data
    metrics_data.to_excel(writer, sheet_name='Metrics Data', index=False)
    # Write summary metrics and Excel formulas for audit
    worksheet = writer.sheets['Metrics Data']
    last_row = len(metrics_data) + 2
    worksheet.write(last_row, 0, 'Summary Metrics', workbook.add_format({'bold': True}))
    worksheet.write(last_row+1, 0, 'CAGR')
    worksheet.write(last_row+1, 1, '=((INDEX(B:B,COUNTA(B:B))/INDEX(B:B,2))^(1/(COUNTA(B:B)-1))-1)')
    worksheet.write(last_row+2, 0, 'MaxDrawdown')
    worksheet.write(last_row+2, 1, '=MIN(C:C)')
    worksheet.write(last_row+3, 0, 'STDEV')
    worksheet.write(last_row+3, 1, '=STDEV(C2:INDEX(C:C,COUNTA(C:C)))')
    worksheet.write(last_row+4, 0, 'Benchmark CAGR')
    worksheet.write(last_row+4, 1, '=((INDEX(E:E,COUNTA(E:E))/INDEX(E:E,2))^(1/(COUNTA(E:E)-1))-1)')
    worksheet.write(last_row+5, 0, 'Benchmark MaxDrawdown')
    worksheet.write(last_row+5, 1, '=MIN(G:G)')
    worksheet.write(last_row+6, 0, 'Benchmark STDEV')
    worksheet.write(last_row+6, 1, '=STDEV(F2:INDEX(F:F,COUNTA(F:F)))')
    worksheet.write(last_row+7, 0, 'Alpha')
    worksheet.write(last_row+7, 1, '=INTERCEPT(C2:INDEX(C:C,COUNTA(C:C)),F2:INDEX(F:F,COUNTA(F:F)))')
    worksheet.write(last_row+8, 0, 'Beta')
    worksheet.write(last_row+8, 1, '=SLOPE(C2:INDEX(C:C,COUNTA(C:C)),F2:INDEX(F:F,COUNTA(F:F)))')
    worksheet.write(last_row+9, 0, 'Tracking Error')
    worksheet.write(last_row+9, 1, '=STDEV(C2:INDEX(C:C,COUNTA(C:C))-F2:INDEX(F:F,COUNTA(F:F)))')
    worksheet.write(last_row+10, 0, 'Sharpe')
    worksheet.write(last_row+10, 1, '=AVERAGE(C2:INDEX(C:C,COUNTA(C:C)))/STDEV(C2:INDEX(C:C,COUNTA(C:C)))')
    worksheet.write(last_row+11, 0, 'Sortino')
    worksheet.write(last_row+11, 1, '=AVERAGE(C2:INDEX(C:C,COUNTA(C:C))/STDEV(IF(C2:INDEX(C:C,COUNTA(C:C))<0,C2:INDEX(C:C,COUNTA(C:C))))')

# Only run top-level code if executed directly
if __name__ == "__main__":
    # Example usage (replace with actual arguments as needed)
    # write_metrics_data_tab(rets_df, writer, workbook)
    pass
# --- Symbol name mapping for QC ---
def get_symbol_name_dict(qb, symbols):
    """Returns a dictionary mapping symbol value to its name/description using QC SymbolPropertiesDatabase."""
    symbol_names = {}
    for symbol in symbols:
        name = symbol.Value
        try:
            props = qb.SymbolPropertiesDatabase.GetSymbolProperties(symbol.ID.Market, symbol, symbol.SecurityType, symbol.ID.QuoteCurrency)
            name = props.Description
        except Exception:
            pass
        symbol_names[symbol.Value] = name
    return symbol_names
#!/usr/bin/env python3
# generate_mp_report.py
# Builds Excel and PDF performance reports from the latest CSV in ObjectStore (QC Research)
# or from a local CSV path when run locally.
# Requirements: pandas, numpy, matplotlib, xlsxwriter
# Install (local): pip install pandas numpy matplotlib xlsxwriter

import os
import re
import io
import sys
import gzip
from datetime import datetime
from typing import Optional, Tuple

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages

# --- CSV Export for EOM Holdings ---
def _write_eom_csv(holdings: pd.DataFrame, output_dir: str) -> str:
    """Write EOM holdings as a CSV with each metric in its own column (one row per position)."""
    os.makedirs(output_dir, exist_ok=True)
    csv_out = os.path.join(output_dir, "MP_EOM_Holdings.csv")
    if holdings is not None and isinstance(holdings, pd.DataFrame) and not holdings.empty:
        # Ensure columns are in the desired order
        desired_cols = [
            'date', 'portfolio_id', 'symbol', 'weight', 'qty', 'price', 'mv', 'total_value'
        ]
        cols = [c for c in desired_cols if c in holdings.columns] + [c for c in holdings.columns if c not in desired_cols]
        holdings[cols].to_csv(csv_out, index=False)
    else:
        pd.DataFrame().to_csv(csv_out, index=False)
    return csv_out

# Fixed default output directory inside the QC Project tree.
DEFAULT_OUTPUT_DIR = "project/report_output"
DEFAULT_CHUNK_SIZE = 400


# --- ObjectStore helpers (QC Research) ---

def _get_qb():
    try:
        from QuantConnect.Research import QuantBook
        return QuantBook()
    except Exception:
        return None


def _read_objectstore_text(qb, key: str) -> Optional[str]:
    if not qb or not key:
        return None
    if str(key).endswith('.gz'):
        try:
            raw = qb.ObjectStore.ReadBytes(key)
            if raw is None:
                return None
            return gzip.decompress(raw).decode('utf-8', errors='replace')
        except Exception:
            return None
    try:
        s = qb.ObjectStore.Read(key)
        if isinstance(s, str) and s:
            return s
    except Exception:
        pass
    try:
        b = qb.ObjectStore.ReadBytes(key)
        if b is not None:
            return b.decode('utf-8', errors='replace')
    except Exception:
        pass
    return None


def _list_objectstore_keys(qb) -> list:
    keys = []
    if not qb:
        return keys
    os_obj = qb.ObjectStore
    try:
        if hasattr(os_obj, 'Keys'):
            for k in os_obj.Keys:
                keys.append(str(k))
    except Exception:
        pass
    try:
        if hasattr(os_obj, 'GetKeys'):
            for k in os_obj.GetKeys():
                keys.append(str(k))
    except Exception:
        pass
    try:
        if hasattr(os_obj, 'GetEnumerator'):
            en = os_obj.GetEnumerator()
            for kv in en:
                k = None
                for attr in ('Key', 'key'):
                        try:
                            k = getattr(kv, attr)
                            break
                        except Exception:
                            continue
                if k is None:
                    k = str(kv)
                if k:
                    keys.append(str(k))
    except Exception:
        pass
    seen = set()
    out = []
    for k in keys:
        if k not in seen:
            seen.add(k)
            out.append(k)
    return out


def _parse_perf_timestamp_from_key(key: str) -> Optional[datetime]:
    if not key:
        return None
    m = re.search(r'(?:perf/perf_|perf__)(\d{8}_\d{6})\.csv(?:\.gz)?$', str(key))
    if not m:
        return None
    ts = m.group(1)
    try:
        return datetime.strptime(ts, '%Y%m%d_%H%M%S')
    except Exception:
        return None


def _find_best_perf_key(qb) -> Optional[str]:
    for cand in ('perf/latest.csv', 'perf/latest.csv.gz'):
        try:
            if hasattr(qb.ObjectStore, 'ContainsKey') and qb.ObjectStore.ContainsKey(cand):
                return cand
            txt = _read_objectstore_text(qb, cand)
            if txt:
                return cand
        except Exception:
            continue
    keys = _list_objectstore_keys(qb)
    perf_keys = [k for k in keys if isinstance(k, str) and (k.startswith('perf/') or k.startswith('perf__')) and (k.endswith('.csv') or k.endswith('.csv.gz'))]
    if not perf_keys:
        return None
    def sort_key(k):
        dt = _parse_perf_timestamp_from_key(k)
        return (dt is not None, dt, k)
    return sorted(perf_keys, key=sort_key, reverse=True)[0]


# --- Core report logic ---

def _compute_metrics_for_window(df_window: pd.DataFrame) -> dict:
    print("\n--- _compute_metrics_for_window diagnostics ---")
    print("df_window type:", type(df_window))
    print("df_window length:", len(df_window))
    print("df_window columns:", df_window.columns.tolist())
    print("df_window head:")
    print(df_window.head())
    
    mret = df_window['net_return'].dropna()
    metrics = {}
    if len(mret) == 0:
        metrics['CAGR'] = np.nan
        metrics['Vol'] = np.nan
        metrics['Sharpe'] = np.nan
        metrics['Sortino'] = np.nan
        metrics['MaxDD'] = np.nan
        metrics['Alpha'] = np.nan
        metrics['Beta'] = np.nan
        metrics['InfoRatio'] = np.nan
        metrics['UpsideCapture'] = np.nan
        metrics['DownsideCapture'] = np.nan
        return metrics
    def ann_return(mret):
        return (np.prod(1 + mret) ** (12.0 / len(mret)) - 1)
    def ann_vol(mret):
        return np.std(mret, ddof=1) * np.sqrt(12) if len(mret) > 1 else np.nan
    def max_drawdown(cum):
        roll_max = cum.cummax()
        return (cum / roll_max - 1).min()
    metrics['CAGR'] = ann_return(mret)
    metrics['Vol'] = ann_vol(mret)
    metrics['Sharpe'] = (np.mean(mret)/np.std(mret, ddof=1))*np.sqrt(12) if len(mret)>1 and np.std(mret, ddof=1)>0 else np.nan
    # Sortino Ratio: mean return / downside deviation (annualized)
    downside = mret[mret < 0]
    downside_std = np.std(downside, ddof=1) if len(downside) > 1 else np.nan
    metrics['Sortino'] = (np.mean(mret)/downside_std)*np.sqrt(12) if len(mret)>1 and downside_std and downside_std>0 else np.nan
    metrics['MaxDD'] = max_drawdown((1+mret).cumprod())
    # Calculate Alpha, Beta, Info Ratio, Upside/Downside Capture if Benchmark returns are available
    metrics['Alpha'] = np.nan
    metrics['Beta'] = np.nan
    metrics['InfoRatio'] = np.nan
    metrics['UpsideCapture'] = np.nan
    metrics['DownsideCapture'] = np.nan
    if 'Benchmark' in df_window.columns:
        port_ret = df_window['net_return'].dropna()
        bench = df_window['Benchmark'].dropna()
        bench_ret = bench.pct_change().dropna()
        port_ret = port_ret.reindex(bench_ret.index).dropna()
        bench_ret = bench_ret.reindex(port_ret.index).dropna()
        print(f"port_ret length: {len(port_ret)}, bench_ret length: {len(bench_ret)}")
        print(f"port_ret index: {port_ret.index[:5].tolist()} ... {port_ret.index[-5:].tolist() if len(port_ret) > 5 else port_ret.index.tolist()}")
        print(f"bench_ret index: {bench_ret.index[:5].tolist()} ... {bench_ret.index[-5:].tolist() if len(bench_ret) > 5 else bench_ret.index.tolist()}")
        print(f"port_ret head: {port_ret.head().values}")
        print(f"bench_ret head: {bench_ret.head().values}")
        if len(port_ret) > 1 and len(bench_ret) > 1:
            # Linear regression: port_ret = alpha + beta * bench_ret
            X = bench_ret.values.reshape(-1, 1)
            Y = port_ret.values
            from sklearn.linear_model import LinearRegression
            reg = LinearRegression().fit(X, Y)
            metrics['Alpha'] = reg.intercept_
            metrics['Beta'] = reg.coef_[0]
            # Info Ratio
            active_ret = port_ret - bench_ret
            if np.std(active_ret, ddof=1) > 0:
                metrics['InfoRatio'] = np.mean(active_ret) / np.std(active_ret, ddof=1) * np.sqrt(12)
            # Upside/Downside Capture
            bench_up = bench_ret > 0
            bench_down = bench_ret < 0
            if bench_up.any():
                metrics['UpsideCapture'] = port_ret[bench_up].mean() / bench_ret[bench_up].mean() if bench_ret[bench_up].mean() != 0 else np.nan
            if bench_down.any():
                metrics['DownsideCapture'] = port_ret[bench_down].mean() / bench_ret[bench_down].mean() if bench_ret[bench_down].mean() != 0 else np.nan
    # DrawdownCapture removed from metrics
    return metrics


def _prepare_returns(df: pd.DataFrame, mgmt_monthly: float = 0.01/12.0) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
    benchmark_series = None
    df = df.copy()
    df.columns = [c.strip() for c in df.columns]
    # Extract benchmark values from BM rows and robustly align for charting
    if 'portfolio_id' in df.columns and 'symbol' in df.columns:
        bm_rows = df[df['portfolio_id'] == 'BM'].copy()
        print("\n--- Benchmark Extraction Diagnostics ---")
        print("bm_rows shape:", bm_rows.shape)
        print("bm_rows columns:", bm_rows.columns.tolist())
        print("bm_rows head:")
        print(bm_rows.head(10))
        # Normalize dates for benchmark rows
        if 'date' in bm_rows.columns:
            bm_rows['date'] = pd.to_datetime(bm_rows['date']).dt.normalize()
            # Prefer 'price' if available, else 'mv', else 'total_value'
            if 'price' in bm_rows.columns and bm_rows['price'].notna().any():
                benchmark_series = bm_rows.set_index('date')['price'].astype(float)
            elif 'mv' in bm_rows.columns and bm_rows['mv'].notna().any():
                benchmark_series = bm_rows.set_index('date')['mv'].astype(float)
            elif 'total_value' in bm_rows.columns and bm_rows['total_value'].notna().any():
                benchmark_series = bm_rows.set_index('date')['total_value'].astype(float)
            else:
                print("Warning: No valid price/mv/total_value found in benchmark rows.")
            # Print extracted benchmark_series
            if benchmark_series is not None:
                print("Extracted benchmark_series:")
                print(benchmark_series)
                print("benchmark_series type:", type(benchmark_series))
                print("benchmark_series head:")
                print(benchmark_series.head(10))
                print("benchmark_series tail:")
                print(benchmark_series.tail(10))
                print("benchmark_series index:")
                print(benchmark_series.index)
                # Diagnostic: Print any NaN in benchmark_series
                nan_bench = benchmark_series[benchmark_series.isna()]
                if not nan_bench.empty:
                    print("DIAGNOSTIC: NaN values found in benchmark_series at these dates:")
                    print(nan_bench)
                unique_vals = benchmark_series.unique() if hasattr(benchmark_series, 'unique') else []
                if len(unique_vals) <= 1:
                    print(f"Warning: Benchmark series is flat or has only one value: {unique_vals}. Mountain chart will be flat.")
                    benchmark_series = None
                else:
                    print(f"Benchmark series has {len(unique_vals)} unique values.")
            else:
                print("Warning: benchmark_series is None after extraction.")
    # Exact symbol-to-name mapping for only symbols present in the DataFrame
    # Build mapping from a user-provided list or from the DataFrame itself
    symbol_list = df['symbol'].unique().tolist() if 'symbol' in df.columns else []
    import json
    symbol_names_path = os.path.join(DEFAULT_OUTPUT_DIR, "symbol_names.json")
    user_symbol_names = None
    # Try to load symbol names from file first
    if os.path.isfile(symbol_names_path):
        try:
            with open(symbol_names_path, "r", encoding="utf-8") as f:
                user_symbol_names = json.load(f)
        except Exception:
            user_symbol_names = None
    # If not loaded, try to build in QC and save
    if user_symbol_names is None:
        try:
            from QuantConnect.Research import QuantBook
            qb = QuantBook()
            if symbol_list and hasattr(qb, 'SymbolPropertiesDatabase'):
                symbols = []
                for sym in symbol_list:
                    try:
                        symbols.append(qb.Symbol(sym))
                    except Exception:
                        pass
                if symbols:
                    user_symbol_names = get_symbol_name_dict(qb, symbols)
                else:
                    user_symbol_names = {sym: sym for sym in symbol_list}
                # Save to file for future use
                try:
                    os.makedirs(DEFAULT_OUTPUT_DIR, exist_ok=True)
                    with open(symbol_names_path, "w", encoding="utf-8") as f:
                        json.dump(user_symbol_names, f, ensure_ascii=False, indent=2)
                except Exception:
                    pass
            else:
                user_symbol_names = {sym: sym for sym in symbol_list}
        except Exception:
            user_symbol_names = {sym: sym for sym in symbol_list}
    # Ensure every symbol in symbol_list is mapped in user_symbol_names
    for sym in symbol_list:
        if sym not in user_symbol_names:
            user_symbol_names[sym] = sym

    # Guarantee every symbol in holdings has a name
    def ensure_all_names_mapped(holdings, user_symbol_names):
        missing = []
        for sym in holdings.index:
            if sym not in user_symbol_names or not user_symbol_names[sym] or user_symbol_names[sym] == sym:
                missing.append(sym)
                user_symbol_names[sym] = sym
        if missing:
            print(f"Warning: No human-readable name found for symbols: {missing}. Using symbol as name.")
        return user_symbol_names
    # Add 'name' column to every row using user mapping
    if 'symbol' in df.columns:
        df['name'] = df['symbol'].map(lambda sym: user_symbol_names.get(sym, sym))
    # Parse date
    try:
        if 'time' in df.columns:
            df['date'] = pd.to_datetime(df['time']).dt.normalize()
        elif 'date' in df.columns:
            df['date'] = pd.to_datetime(df['date']).dt.normalize()
        else:
            # Print columns and first few rows for diagnosis
            print('Could not find date or time column. Columns:', df.columns.tolist())
            print('First 5 rows:', df.head())
            raise ValueError('No valid date column found. Please check your CSV structure.')
    except Exception as e:
        print('Date parsing error:', e)
        print('Columns:', df.columns.tolist())
        print('First 5 rows:', df.head())
        raise
    # Determine totals by date (prefer explicit total_value, else sum mv for EOM rows)
    total_by_date = None
    if 'total_value' in df.columns and df['total_value'].notna().any():
        tmp = df[['date','total_value']].dropna()
        if not tmp.empty:
            total_by_date = tmp.groupby('date')['total_value'].last().astype(float)
    if total_by_date is None or total_by_date.empty:
        # Try EOM_POSITION rows
        if 'event_type' in df.columns:
            eom = df[df['event_type'] == 'EOM_POSITION']
            if not eom.empty and 'mv' in eom.columns:
                total_by_date = eom.groupby('date')['mv'].sum().astype(float)
        if total_by_date is None or total_by_date.empty:
            # Fallback: sum mv across all rows per date
            if 'mv' in df.columns:
                total_by_date = df.groupby('date')['mv'].sum().astype(float)
    if total_by_date is None or total_by_date.empty:
        raise ValueError("Could not compute portfolio totals (need 'total_value' or 'mv').")
    total_by_date = total_by_date.sort_index()
    pv = total_by_date[~total_by_date.index.duplicated(keep='last')]
    gross_ret = pv.pct_change().dropna()
    net_ret = (1 + gross_ret) * (1 - mgmt_monthly) - 1
    rets_df = pd.DataFrame({
        'total_value': pv,
        'gross_return': gross_ret.reindex(pv.index),
        'net_return': net_ret.reindex(pv.index)
    }).ffill()
    # Assign benchmark price series to rets_df['Benchmark'] before any calculations
    # Filter to valid portfolio dates first
    rets_df = rets_df.loc[rets_df['gross_return'].notna()]
    initial_value = 100000
    # Diagnostic: Confirm benchmark_series is valid before assignment block
    print("DIAGNOSTIC: Entering benchmark assignment block. benchmark_series is None?", benchmark_series is None)
    if benchmark_series is not None:
        # Normalize, deduplicate, and sort both indices
        rets_df.index = pd.to_datetime(rets_df.index).normalize()
        benchmark_series.index = pd.to_datetime(benchmark_series.index).normalize()
        rets_df = rets_df[~rets_df.index.duplicated(keep='first')].sort_index()
        benchmark_series = benchmark_series[~benchmark_series.index.duplicated(keep='first')].sort_index()
        # Only filter out portfolio dates before first available benchmark date
        first_bench_date = benchmark_series.dropna().index.min()
        rets_df = rets_df.loc[rets_df.index >= first_bench_date]
        # Diagnostics: Print dtypes and index values before assignment
        print("DIAGNOSTIC: rets_df index dtype:", type(rets_df.index))
        print("DIAGNOSTIC: benchmark_series index dtype:", type(benchmark_series.index))
        print("DIAGNOSTIC: rets_df index values:", list(rets_df.index))
        print("DIAGNOSTIC: benchmark_series index values:", list(benchmark_series.index))
        # Assign benchmark values to all portfolio dates using forward-fill
        aligned_bench = benchmark_series.reindex(rets_df.index, method='pad')
        # If any NaNs remain (e.g., at the start), fill with first available value
        if aligned_bench.isna().any():
            first_val = benchmark_series.dropna().iloc[0]
            aligned_bench = aligned_bench.fillna(first_val)
        rets_df['Benchmark'] = aligned_bench
        # Always print first and last 10 values of rets_df['Benchmark'] after assignment
        print("DIAGNOSTIC: First 10 values of rets_df['Benchmark'] after assignment:")
        print(rets_df['Benchmark'].head(10))
        print("DIAGNOSTIC: Last 10 values of rets_df['Benchmark'] after assignment:")
        print(rets_df['Benchmark'].tail(10))
        # Print any NaNs in rets_df['Benchmark']
        nan_bench = rets_df['Benchmark'][rets_df['Benchmark'].isna()]
        if not nan_bench.empty:
            print("DIAGNOSTIC: NaN values found in rets_df['Benchmark'] at these dates:")
            print(nan_bench)
        # Always calculate cum_net and cum_bench for mountain chart using synchronized start
        rets_df['cum_net'] = initial_value * (1 + rets_df['net_return']).cumprod()
        bench_ret = rets_df['Benchmark'].pct_change().fillna(0)
        rets_df['cum_bench'] = initial_value * (1 + bench_ret).cumprod()
        rets_df['cum_bench'] = rets_df['cum_bench'].fillna(method='ffill').fillna(method='bfill')
        print('cum_bench head (after fix):')
        print(rets_df['cum_bench'].head(10))
        print('cum_bench tail (after fix):')
        print(rets_df['cum_bench'].tail(10))
        # Print any NaNs in rets_df['cum_bench']
        nan_cum_bench = rets_df['cum_bench'][rets_df['cum_bench'].isna()]
        if not nan_cum_bench.empty:
            print("DIAGNOSTIC: NaN values found in rets_df['cum_bench'] at these dates:")
            print(nan_cum_bench)
    else:
        rets_df['cum_net'] = initial_value * (1 + rets_df['net_return']).cumprod()
        rets_df['Benchmark'] = np.nan
        rets_df['cum_bench'] = np.nan

    # Diagnostics: print first and last 10 values of cum_net and net_return
    print("Diagnostics: First 10 values of cum_net and net_return")
    print(rets_df[['cum_net', 'net_return']].head(10))
    print("cum_net tail:")
    print(rets_df['cum_net'].tail(10))
    print("net_return tail:")
    print(rets_df['net_return'].tail(10))
    # Do NOT overwrite benchmark_series or rets_df['Benchmark'] again
    # All alignment and cum_bench calculation is already done above
    # Calculate drawdown after benchmark is aligned
    def calc_drawdown(series):
        roll_max = series.cummax()
        return (series / roll_max - 1)
    if 'cum_net' in rets_df.columns:
        rets_df['MaxDD'] = calc_drawdown(rets_df['cum_net'])
    if 'Benchmark' in rets_df.columns:
        rets_df['BenchmarkDrawdown'] = calc_drawdown(rets_df['Benchmark'])
    # ...existing code...
    # Holdings snapshot for last date (from EOM_POSITION if present)
    holdings = pd.DataFrame()
    if 'portfolio_id' in df.columns and 'qty' in df.columns and 'mv' in df.columns and 'symbol' in df.columns:
        last_date = df['date'].max()
        eom_holdings = df[(df['portfolio_id'] == 'MP') & (df['qty'] > 0) & (df['mv'] > 0) & (df['date'] == last_date)]
        cols = [c for c in ['symbol','qty','weight','price','mv'] if c in eom_holdings.columns]
        if not eom_holdings.empty and cols:
            holdings = eom_holdings[cols].drop_duplicates(subset=['symbol']).set_index('symbol')
        # Always ensure 'name' column is present and correct for every symbol
        if not holdings.empty:
            def map_name(sym):
                name = user_symbol_names.get(sym)
                if name is None:
                    print(f"Warning: No name found for symbol '{sym}' in user_symbol_names. Using symbol as name.")
                    return sym
                return name
            holdings['name'] = holdings.index.map(map_name)
    # Monthly matrix (last 10 years)
    period_months = 120
    pr = rets_df['net_return'].tail(period_months).reset_index().rename(columns={'index':'date'})
    pr['Year'] = pr['date'].dt.year
    pr['Month'] = pr['date'].dt.strftime('%b')
    pivot = pr.pivot(index='Year', columns='Month', values='net_return')
    month_order = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
    existing_months = [m for m in month_order if m in pivot.columns]
    pivot = pivot[existing_months].fillna("")
    return rets_df, holdings, pivot


def _write_excel(rets_df: pd.DataFrame, holdings: pd.DataFrame, pivot: pd.DataFrame, output_dir: str) -> str:
    os.makedirs(output_dir, exist_ok=True)
    excel_out = os.path.join(output_dir, "MP_Performance_Report_Results.xlsx")
    metrics_table = {}
    periods = [1,3,5,7,10]
    for h in periods:
        months = h * 12
        window = rets_df.tail(months)
        # Remove DrawdownCapture from metrics
        metrics_table[h] = _compute_metrics_for_window(window) if len(window) > 0 else {k:np.nan for k in ['CAGR','Vol','Sharpe','MaxDD','Alpha','Beta','InfoRatio','UpsideCapture','DownsideCapture']}
    with pd.ExcelWriter(excel_out, engine='xlsxwriter') as writer:
        workbook = writer.book
        # Company header and disclosure format
        header_format = workbook.add_format({'bold': True, 'font_color': '#003366', 'font_size': 16})
        subheader_format = workbook.add_format({'font_color': '#003366', 'font_size': 12})
        disclosure_format = workbook.add_format({'font_color': 'gray', 'font_size': 8, 'italic': True})
        # Print-friendly summary sheet
        summary_sheet = 'Institutional_Summary'
        worksheet = workbook.add_worksheet(summary_sheet)
        writer.sheets[summary_sheet] = worksheet
        worksheet.write('A1', 'alphaNavigator', header_format)
        worksheet.write('A2', 'Management Style: Dynamic Equity', subheader_format)
        worksheet.write('F1', 'Company Name', header_format)
        worksheet.write('A3', 'Disclosure: For institutional use only. Past performance is not indicative of future results. See full disclosures at end.', disclosure_format)
        # Summary boxes
        box_labels = [f"Last {h} Year{'s' if h > 1 else ''}" for h in periods]
        metrics_keys = ['CAGR','MaxDD','Sharpe','Sortino','UpsideCapture','DownsideCapture','Alpha','Beta','InfoRatio']
        metrics_display_names = ['CAGR','MaxDD','Sharpe','Sortino','Upside Capture','Downside Capture','Alpha','Beta','Info Ratio']
        for i, h in enumerate(periods):
            m = metrics_table.get(h, {})
            box_row = 5 + i*10
            worksheet.write(box_row, 0, box_labels[i], header_format)
            for j, key in enumerate(metrics_keys):
                val = m.get(key, np.nan)
                # Display Sharpe and Sortino as decimals, others as % where appropriate
                if key in ['Sharpe','Sortino','Alpha','Beta','InfoRatio']:
                    val_str = f"{val:.2f}" if isinstance(val, float) and val==val else "N/A"
                else:
                    val_str = f"{val:.2%}" if isinstance(val, float) and val==val else "N/A"
                worksheet.write(box_row+1+j, 0, metrics_display_names[j], subheader_format)
                worksheet.write(box_row+1+j, 1, val_str)
        # Mountain Equity vs Benchmark chart
        print("rets_df columns before charting:", rets_df.columns)
        rets_df_reset = rets_df.reset_index()
        print("rets_df_reset columns before charting:", rets_df_reset.columns)
        rets_df_reset.to_excel(writer, sheet_name='Monthly_Net_Returns', index=False)
        date_col = rets_df_reset.columns.get_loc('date')
        cum_net_col = rets_df_reset.columns.get_loc('cum_net')
        chart1 = workbook.add_chart({'type': 'line'})
        chart1.add_series({
            'name': 'Portfolio',
            'categories': ['Monthly_Net_Returns', 1, date_col, len(rets_df_reset), date_col],
            'values': ['Monthly_Net_Returns', 1, cum_net_col, len(rets_df_reset), cum_net_col]
        })
        # Use cum_bench for benchmark line if available
        if 'cum_bench' in rets_df_reset.columns:
            cum_bench_col = rets_df_reset.columns.get_loc('cum_bench')
            print("cum_bench head (for chart):")
            print(rets_df_reset['cum_bench'].head(10))
            print("cum_bench tail (for chart):")
            print(rets_df_reset['cum_bench'].tail(10))
            chart1.add_series({
                'name': 'Benchmark',
                'categories': ['Monthly_Net_Returns', 1, date_col, len(rets_df_reset), date_col],
                'values': ['Monthly_Net_Returns', 1, cum_bench_col, len(rets_df_reset), cum_bench_col]
            })
        else:
            print("cum_bench column missing for charting!")
            chart1.add_series({
                'name': 'Benchmark',
                'categories': ['Monthly_Net_Returns', 1, date_col, 1, date_col],
                'values': ['Monthly_Net_Returns', 1, date_col, 1, date_col]
            })
        chart1.set_title({'name': 'Growth of $1 — Portfolio vs Benchmark'})
        chart1.set_x_axis({'name': 'Date'})
        chart1.set_y_axis({'name': 'Cumulative Value'})
        worksheet.insert_chart('C5', chart1)
        # Drawdown vs Benchmark chart (always plot both)
        chart2 = workbook.add_chart({'type': 'line'})
        chart2.add_series({'name': 'Portfolio Drawdown', 'categories': ['Monthly_Net_Returns', 1, 0, len(rets_df), 0], 'values': ['Monthly_Net_Returns', 1, rets_df.columns.get_loc('MaxDD'), len(rets_df), rets_df.columns.get_loc('MaxDD')]})
        if 'BenchmarkDrawdown' in rets_df.columns:
            chart2.add_series({'name': 'Benchmark Drawdown', 'categories': ['Monthly_Net_Returns', 1, 0, len(rets_df), 0], 'values': ['Monthly_Net_Returns', 1, rets_df.columns.get_loc('BenchmarkDrawdown'), len(rets_df), rets_df.columns.get_loc('BenchmarkDrawdown')]})
        else:
            chart2.add_series({'name': 'Benchmark Drawdown', 'categories': ['Monthly_Net_Returns', 1, 0, 1, 0], 'values': ['Monthly_Net_Returns', 1, 0, 1, 0]})
        chart2.set_title({'name': 'Drawdown — Portfolio vs Benchmark'})
        chart2.set_x_axis({'name': 'Date'})
        chart2.set_y_axis({'name': 'Drawdown'})
        worksheet.insert_chart('C22', chart2)
        # Metrics table (show all periods: 1, 3, 5, 7, 10 years)
        pd.DataFrame(metrics_table).T.to_excel(writer, sheet_name='Period_Metrics', index=True)
        # Holdings table with 'name' column for every symbol
        if holdings is not None and isinstance(holdings, pd.DataFrame) and not holdings.empty:
            hold_tbl = holdings.copy()
            # Ensure 'name' column exists and is printed in CSV
            if 'name' not in hold_tbl.columns:
                hold_tbl['name'] = hold_tbl.index
            # Move 'name' column to be right after 'symbol' for clarity
            hold_tbl = hold_tbl.reset_index()
            cols = list(hold_tbl.columns)
            if 'name' in cols and 'symbol' in cols:
                name_idx = cols.index('name')
                symbol_idx = cols.index('symbol')
                # Remove 'name' and insert after 'symbol'
                cols.pop(name_idx)
                cols.insert(symbol_idx + 1, 'name')
                hold_tbl = hold_tbl[cols]
            hold_tbl.to_excel(writer, sheet_name='EOM_Holdings', index=False)
            # Print log output with names
            print('Holdings snapshot:')
            print(hold_tbl.to_string(index=False))
        else:
            pd.DataFrame().to_excel(writer, sheet_name='EOM_Holdings')
        # Monthly matrix
        pivot.to_excel(writer, sheet_name='Monthly_Matrix', index=True)
    return excel_out


def _write_pdf(rets_df: pd.DataFrame, holdings: pd.DataFrame, pivot: pd.DataFrame, output_dir: str) -> str:
    os.makedirs(output_dir, exist_ok=True)
    pdf_out = r"C:/Users/johnj/OneDrive/Desktop/Python3/QC_CSV/output/MP_Performance_Report_Mock.pdf"
    metrics_table = {}
    periods = [1,3,5,7,10]
    for h in periods:
        months = h * 12
        window = rets_df.tail(months)
        # Remove DrawdownCapture from metrics
        metrics_table[h] = _compute_metrics_for_window(window) if len(window) > 0 else {k:np.nan for k in ['CAGR','Vol','Sharpe','MaxDD','Alpha','Beta','InfoRatio','UpsideCapture','DownsideCapture']}
    last_date = rets_df.index.max() if len(rets_df.index) else datetime.utcnow()
    # Prepare metrics_display and row_labels before PDF rendering
    metrics_df = pd.DataFrame(metrics_table).T
    if 'Vol' in metrics_df.columns:
        metrics_df = metrics_df.drop(columns=['Vol'])
    col_order = ['CAGR','MaxDD','Sharpe','Sortino','UpsideCapture','DownsideCapture','Alpha','Beta','InfoRatio']
    metrics_df = metrics_df[[c for c in col_order if c in metrics_df.columns]]
    metrics_display = metrics_df.copy()
    for c in ['CAGR','MaxDD','UpsideCapture','DownsideCapture']:
        try:
            metrics_display[c] = metrics_display[c].apply(lambda x: f"{x:.2%}" if x==x else "N/A")
        except Exception:
            pass
    for c in ['Sharpe','Sortino','Alpha','Beta','InfoRatio']:
        try:
            metrics_display[c] = metrics_display[c].apply(lambda x: f"{x:.2f}" if x==x else "N/A")
        except Exception:
            pass
    period_labels = {1: '1 YR', 3: '3 YRs', 5: '5 YRs', 7: '7 YRs', 10: '10 YRs'}
    row_labels = [period_labels.get(h, str(h)) for h in metrics_display.index]

    fig_written = False
    with PdfPages(pdf_out) as pdf:
        # Basic summary table
        fig, ax = plt.subplots(figsize=(8.5, 11))
        ax.axis('off')
        # Table: metrics_display
        table_data = [metrics_display.columns.tolist()] + metrics_display.values.tolist()
        table = ax.table(cellText=table_data, loc='center')
        table.auto_set_font_size(False)
        table.set_fontsize(10)
        table.scale(1, 2)
        pdf.savefig(fig)
        plt.close(fig)
        fig_written = True

        # Basic line chart: Portfolio and Benchmark
        fig2, ax2 = plt.subplots(figsize=(8.5, 5))
        if 'cum_net' in rets_df.columns:
            ax2.plot(rets_df.index, rets_df['cum_net'], label='Portfolio')
        if 'cum_bench' in rets_df.columns:
            ax2.plot(rets_df.index, rets_df['cum_bench'], label='Benchmark')
        ax2.legend()
        ax2.set_title('Growth of $1')
        pdf.savefig(fig2)
        plt.close(fig2)
        fig_written = True

        if not fig_written:
            fig_diag = plt.figure(figsize=(8.5, 11))
            plt.text(0.5, 0.5, 'DIAGNOSTIC: No figures were written to this PDF.', ha='center', va='center', fontsize=16)
            plt.axis('off')
            pdf.savefig(fig_diag)
            plt.close(fig_diag)
            print('DIAGNOSTIC: No figures were written to the PDF, added diagnostic page.')
        return pdf_out
        bar_width = 0.4
        x = np.arange(len(all_months))
        ax2.bar(x - bar_width/2, strat_monthly, width=bar_width, label='Strategy', color='#0055a4')
        if bench_monthly is not None:
            ax2.bar(x + bar_width/2, bench_monthly, width=bar_width, label='Benchmark', color='#ff9900')
        ax2.set_xticks(x)
        ax2.set_xticklabels([str(m) for m in all_months], rotation=90, fontsize=8)
        ax2.set_ylabel('Monthly Return')
        ax2.legend()
        fig2.tight_layout(rect=[0, 0.03, 1, 0.95])
        pdf.savefig(fig2); plt.close(fig2)
    return pdf_out


def run_report(key: Optional[str] = None, output_dir: str = DEFAULT_OUTPUT_DIR, local_csv_path: Optional[str] = None,
               mgmt_monthly: float = 0.01/12.0) -> Tuple[str, str]:
    """Generate Excel and PDF reports from ObjectStore (QC) or a local CSV file.

    - If local_csv_path is provided and exists, that file is used.
    - Else, attempts to read from ObjectStore in QC Research using latest key or timestamped key.
    Returns (excel_path, pdf_path).
    """
    os.makedirs(output_dir, exist_ok=True)
    # Load CSV text
    text = None
    # Default local path for CSV
    default_local_csv = os.path.expanduser(r'C:/Users/johnj/Desktop/Python3/QC CSV/eom_performance.csv')
    if local_csv_path and os.path.isfile(local_csv_path):
        with open(local_csv_path, 'r', encoding='utf-8', errors='replace') as f:
            text = f.read()
    elif os.path.isfile(default_local_csv):
        print(f"Reading local CSV from: {default_local_csv}")
        with open(default_local_csv, 'r', encoding='utf-8', errors='replace') as f:
            text = f.read()
    else:
        qb = _get_qb()
        if qb is None:
            raise RuntimeError("QuantBook not available and no local_csv_path provided, and default local CSV not found.")
        if not key:
            key = _find_best_perf_key(qb)
        if not key:
            raise RuntimeError("No ObjectStore perf key found (perf/latest.csv or timestamped perf_YYYYMMDD_HHMMSS).")
        print("Selected ObjectStore key:", key)
        text = _read_objectstore_text(qb, key)
        if not text:
            raise RuntimeError(f"Failed to read ObjectStore key: {key}")
    # Parse CSV
    import tempfile
    # Clean double quotes if present
    with tempfile.NamedTemporaryFile(delete=False, mode='w', encoding='utf-8', newline='') as tmpfile:
        for line in text.splitlines():
            tmpfile.write(line.replace('"', '') + '\n')
        clean_csv_path = tmpfile.name
    df = pd.read_csv(clean_csv_path)
    os.remove(clean_csv_path)
    rets_df, holdings, pivot = _prepare_returns(df, mgmt_monthly=mgmt_monthly)
    # Write outputs
    excel_out = _write_excel(rets_df, holdings, pivot, output_dir)
    pdf_out = _write_pdf(rets_df, holdings, pivot, output_dir)
    # Write EOM holdings as CSV (one row per position, separate columns)
    eom_csv_out = _write_eom_csv(holdings, output_dir)
    # Save as Base64 text files for download
    import base64
    def save_as_base64(input_path, output_path):
        with open(input_path, 'rb') as f:
            b64 = base64.b64encode(f.read()).decode('utf-8')
        with open(output_path, 'w', encoding='utf-8') as f:
            f.write(b64)
    excel_b64 = os.path.join(output_dir, "MP_Performance_Report_Results.xlsx.b64.txt")
    pdf_b64 = os.path.join(output_dir, "MP_Performance_Report_Mock.pdf.b64.txt")
    save_as_base64(excel_out, excel_b64)
    save_as_base64(pdf_out, pdf_b64)
    # Optionally save EOM CSV as base64 if needed
    eom_csv_b64 = os.path.join(output_dir, "MP_EOM_Holdings.csv.b64.txt")
    save_as_base64(eom_csv_out, eom_csv_b64)
    print("Wrote files to:", os.path.abspath(output_dir))
    print(" Excel:", excel_out)
    print(" PDF  :", pdf_out)
    print(" EOM CSV:", eom_csv_out)
    print(" Excel Base64:", excel_b64)
    print(" PDF Base64:", pdf_b64)
    print(" EOM CSV Base64:", eom_csv_b64)
    return excel_out, pdf_out


if __name__ == "__main__":
    import argparse
    p = argparse.ArgumentParser(description="Generate MP performance reports (Excel/PDF) from CSV in ObjectStore or local path.")
    p.add_argument('--key', type=str, default=None, help='ObjectStore key (default: auto-detect latest/timestamped)')
    p.add_argument('--output-dir', type=str, default=DEFAULT_OUTPUT_DIR)
    p.add_argument('--local-csv', type=str, default=None, help='Use a local CSV file instead of ObjectStore')
    p.add_argument('--mgmt-bps', type=float, default=100.0, help='Annual management fee in basis points (default 100 bps = 1.00%/yr)')
    args = p.parse_args()
    monthly_fee = (args.mgmt_bps / 10000.0) / 12.0
    # Use 'tools/eom_performance.csv' as the default ObjectStore key if none is provided
    report_key = args.key if args.key else "tools/eom_performance.csv"
    output_dir = args.output_dir if args.output_dir else "project/report_output"
    run_report(key=report_key, output_dir=output_dir, local_csv_path=args.local_csv, mgmt_monthly=monthly_fee)