Overall Statistics |
Total Orders 581 Average Win 0.39% Average Loss -0.15% Compounding Annual Return -3.186% Drawdown 22.200% Expectancy -0.068 Start Equity 100000 End Equity 91145.95 Net Profit -8.854% Sharpe Ratio -0.289 Sortino Ratio -0.367 Probabilistic Sharpe Ratio 1.456% Loss Rate 74% Win Rate 26% Profit-Loss Ratio 2.63 Alpha -0.013 Beta -0.896 Annual Standard Deviation 0.145 Annual Variance 0.021 Information Ratio -0.262 Tracking Error 0.284 Treynor Ratio 0.047 Total Fees $620.21 Estimated Strategy Capacity $680000000.00 Lowest Capacity Asset SPY R735QTJ8XC9X Portfolio Turnover 6.45% |
#region imports from AlgorithmImports import * import tensorflow as tf #endregion class SwimmingFluorescentYellowCormorant(QCAlgorithm): def Initialize(self): self.SetStartDate(2021, 6, 22) # Set Start Date self.SetCash(100000) # Set Strategy Cash self.symbol = self.AddEquity("SPY", Resolution.Daily).Symbol num_factors = 5 num_neurons_1 = 10 num_neurons_2 = 10 num_neurons_3 = 5 self.epochs = 100 self.learning_rate = 0.0001 self.model = tf.keras.Sequential([ tf.keras.layers.Dense(num_neurons_1, activation=tf.nn.relu, input_shape=(num_factors,)), # input shape required tf.keras.layers.Dense(num_neurons_2, activation=tf.nn.relu), tf.keras.layers.Dense(num_neurons_3, activation=tf.nn.relu), tf.keras.layers.Dense(1) ]) training_length = 500 self.training_data = RollingWindow[float](training_length) history = self.History[TradeBar](self.symbol, training_length, Resolution.Daily) for trade_bar in history: self.training_data.Add(trade_bar.Close) self.Train(self.my_training_method) self.Train(self.date_rules.week_start(), self.time_rules.at(8, 0), self.my_training_method) def get_features_and_labels(self, lookback=5): lookback_series = [] data = pd.Series(list(self.training_data)[::-1]) for i in range(1, lookback + 1): df = data.diff(i)[lookback:-1] df.name = f"close-{i}" lookback_series.append(df) X = pd.concat(lookback_series, axis=1).reset_index(drop=True).dropna() Y = data.diff(-1)[lookback:-1].reset_index(drop=True) return X.values, Y.values def my_training_method(self): features, labels = self.get_features_and_labels() # Define the loss function, we use MSE in this example def loss_mse(target_y, predicted_y): return tf.reduce_mean(tf.square(target_y - predicted_y)) # Train the model optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate) for i in range(self.epochs): with tf.GradientTape() as t: loss = loss_mse(labels, self.model(features)) jac = t.gradient(loss, self.model.trainable_weights) optimizer.apply_gradients(zip(jac, self.model.trainable_weights)) def OnData(self, data): if data.Bars.ContainsKey(self.symbol): self.training_data.Add(data.Bars[self.symbol].Close) new_features, __ = self.get_features_and_labels() prediction = self.model(new_features) prediction = float(prediction.numpy()[-1]) if prediction > 0: self.SetHoldings(self.symbol, 1) else: self.SetHoldings(self.symbol, -1)