Overall Statistics
Total Orders
370
Average Win
0.39%
Average Loss
-0.14%
Compounding Annual Return
-12.109%
Drawdown
35.600%
Expectancy
-0.433
Start Equity
100000
End Equity
79212.26
Net Profit
-20.788%
Sharpe Ratio
-0.906
Sortino Ratio
-1.089
Probabilistic Sharpe Ratio
0.506%
Loss Rate
85%
Win Rate
15%
Profit-Loss Ratio
2.70
Alpha
-0.042
Beta
-0.969
Annual Standard Deviation
0.136
Annual Variance
0.019
Information Ratio
-0.771
Tracking Error
0.269
Treynor Ratio
0.127
Total Fees
$376.12
Estimated Strategy Capacity
$960000000.00
Lowest Capacity Asset
SPY R735QTJ8XC9X
Portfolio Turnover
2.37%
#region imports
from AlgorithmImports import *
import aesara
import aesara.tensor as at
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import joblib
#endregion

class AeseraExampleAlgorithm(QCAlgorithm):

    def initialize(self):
        self.set_start_date(2022, 7, 4)
        self.set_cash(100000)
        self.symbol = self.add_equity("SPY", Resolution.DAILY).symbol

        training_length = 252*2
        self.training_data = RollingWindow[TradeBar](training_length)
        history = self.history[TradeBar](self.symbol, training_length, Resolution.DAILY)
        for trade_bar in history:
            self.training_data.add(trade_bar)

        #if self.object_store.contains_key("train") and self.object_store.contains_key("predict"):
        #    train_file_name = self.object_store.get_file_path("train")
        #    predict_file_name = self.object_store.get_file_path("predict")
        #    self.predict = joblib.load(train_file_name)
        #    self.predict = joblib.load(predict_file_name)
        #else:
        # Declare Aesara symbolic variables
        x = at.dmatrix("x")
        y = at.dvector("y")

        # initialize the weight vector w randomly
        # this and the following bias variable b
        # are shared so they keep their values
        # between training iterations (updates)
        rng = np.random.default_rng(100)
        w = aesara.shared(rng.standard_normal(5), name="w")
        # initialize the bias term
        b = aesara.shared(0., name="b")

        # Construct Aesara expression graph
        p_1 = 1 / (1 + at.exp(-at.dot(x, w) - b))       # Probability that target = 1
        prediction = p_1 > 0.5                          # The prediction thresholded
        xent = y * at.log(p_1) - (1-y) * at.log(1-p_1)  # Cross-entropy log-loss function
        cost = xent.mean() + 0.01 * (w ** 2).sum()      # The cost to minimize
        gw, gb = at.grad(cost, [w, b])                  # Compute the gradient of the cost
                                                        # w.r.t weight vector w and
                                                        # bias term b (we shall
                                                        # return to this in a
                                                        # following section of this
                                                        # tutorial)

        # Compile
        self._train = aesara.function(
                inputs=[x, y],
                outputs=[prediction, xent],
                updates=((w, w - 0.1 * gw), (b, b - 0.1 * gb)))
        self.predict = aesara.function(inputs=[x], outputs=prediction)

        self.train(self.my_training_method)
        self.train(self.date_rules.every(DayOfWeek.SUNDAY), self.time_rules.at(8,0), self.my_training_method)

    def get_features_and_labels(self, n_steps=5):
        training_df = self.pandas_converter.get_data_frame[TradeBar](list(self.training_data)[::-1])['close']

        features = []
        for i in range(1, n_steps + 1):
            close = training_df.shift(i)[n_steps:-1]
            close.name = f"close-{i}"
            features.append(close)
        features = pd.concat(features, axis=1)
        # Normalize using the 5 day interval
        features = MinMaxScaler().fit_transform(features.T).T[4:]
        
        Y = training_df.pct_change().shift(-1)[n_steps*2-1:-1].reset_index(drop=True)
        labels = np.array([1 if y > 0 else 0 for y in Y])   # binary class

        return features, labels

    def my_training_method(self):
        features, labels = self.get_features_and_labels()
        D = (features, labels)
        self._train(D[0], D[1])

        model_key = "model_test_aesera"
        file_name = self.object_store.get_file_path(model_key)
        joblib.dump(self.predict, file_name)
        self.object_store.save(model_key)

    def on_data(self, slice):
        if self.symbol in slice.bars:
            self.training_data.add(slice.bars[self.symbol])

        features, _ = self.get_features_and_labels()
        prediction = self.predict(features[-1].reshape(1, -1))
        prediction = float(prediction)

        if prediction == 1:
            self.set_holdings(self.symbol, 1)
        elif prediction == 0:            
            self.set_holdings(self.symbol, -1)