Overall Statistics
Total Orders
168
Average Win
1.79%
Average Loss
-1.06%
Compounding Annual Return
1.902%
Drawdown
26.600%
Expectancy
0.075
Start Equity
100000
End Equity
105494.14
Net Profit
5.494%
Sharpe Ratio
-0.065
Sortino Ratio
-0.077
Probabilistic Sharpe Ratio
4.338%
Loss Rate
60%
Win Rate
40%
Profit-Loss Ratio
1.70
Alpha
-0.034
Beta
0.781
Annual Standard Deviation
0.142
Annual Variance
0.02
Information Ratio
-0.456
Tracking Error
0.09
Treynor Ratio
-0.012
Total Fees
$338.91
Estimated Strategy Capacity
$1200000000.00
Lowest Capacity Asset
SPY R735QTJ8XC9X
Portfolio Turnover
23.25%
#region imports
from AlgorithmImports import *
import tensorflow as tf
#endregion

class SwimmingFluorescentYellowCormorant(QCAlgorithm):

    def initialize(self):
        self.set_start_date(2021, 6, 22)  # Set Start Date
        self.set_cash(100000)  # Set Strategy Cash
        
        self.symbol = self.add_equity("SPY", Resolution.DAILY).symbol

        num_factors = 5
        num_neurons_1 = 10
        num_neurons_2 = 20
        num_neurons_3 = 5
        self.epochs = 20
        self.learning_rate = 0.0001

        self.model = tf.keras.Sequential([
            tf.keras.layers.Dense(num_neurons_1, activation=tf.nn.relu, input_shape=(num_factors,)),  # input shape required
            tf.keras.layers.Dense(num_neurons_2, activation=tf.nn.relu),
            tf.keras.layers.Dense(num_neurons_3, activation=tf.nn.relu),
            tf.keras.layers.Dense(1)
        ])

        training_length = 252*2
        self.training_data = RollingWindow[float](training_length)
        history = self.history[TradeBar](self.symbol, training_length, Resolution.DAILY)
        for trade_bar in history:
            self.training_data.add(trade_bar.close)

        self.train(self.my_training_method)

    def get_features_and_labels(self, lookback=5):
        lookback_series = []

        data = pd.Series(list(self.training_data)[::-1])
        for i in range(1, lookback + 1):
            df = data.diff(i)[lookback:-1]
            df.name = f"close-{i}"
            lookback_series.append(df)

        X = pd.concat(lookback_series, axis=1).reset_index(drop=True).dropna()
        Y = data.diff(-1)[lookback:-1].reset_index(drop=True)
        return X.values, Y.values

    def my_training_method(self):
        features, labels = self.get_features_and_labels()

        # Define the loss function, we use MSE in this example
        def loss_mse(target_y, predicted_y):
            return tf.reduce_mean(tf.square(target_y - predicted_y))

        # Train the model
        optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
        for i in range(self.epochs):
            with tf.GradientTape() as t:
                loss = loss_mse(labels, self.model(features))

            jac = t.gradient(loss, self.model.trainable_weights)
            optimizer.apply_gradients(zip(jac, self.model.trainable_weights))

    def on_data(self, data):
        if data.bars.contains_key(self.symbol):
            self.training_data.add(data.bars[self.symbol].close)

            new_features, __ = self.get_features_and_labels()
            prediction = self.model(new_features)
            prediction = float(prediction.numpy()[-1])

            if prediction > 0:
                self.set_holdings(self.symbol, 1)
            else:            
                self.set_holdings(self.symbol, -1)