I've been building this machine learning algorithm with TensorFlow but failed to complete the backtest. 

There was no other error during the backtest, but this error message popped right before the backtest was about to complete. I don't see this error message as useful for me to debug anything. Therefore I'm posting here and see if anyone has this similar issue and is fortunate to solve this.

[ERROR] FATAL UNHANDLED EXCEPTION:2022-09-29 12:46:30.266444: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network 
Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 FMA,To enable them in other operations, rebuild TensorFlow with the appropriate compiler 
flags.,2022-09-29 12:46:30.346840: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot 
open shared object file: No such file or directory,2022-09-29 12:46:30.346862: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU 
set up on your machine.,2022-09-29 12:46:30.362853: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin 
cuBLAS when one has already been registered,2022-09-29 12:46:30.863820: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; 
dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory,2022-09-29 12:46:30.863884: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not 
load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory,2022-09-29 12:46:30.863891: W 
tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the 
missing libraries mentioned above are installed properly.,2022-09-29 12:48:15.483565: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 
'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory,2022-09-29 12:48:15.483585: W tensorflow/stream_executor/cuda/cuda_driver.cc:263] failed 
call to cuInit: UNKNOWN ERROR (303),2022-09-29 12:48:15.483601: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host 
(BACKTESTING-78-042b3f9e0ed542251fac26be0691434b): /proc/driver/nvidia/version does not exist,2022-09-29 12:48:15.483791: I tensorflow/core/platform/cpu_feature_guard.cc:193] This 
TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 FMA,To enable them in other
operations, rebuild TensorFlow with the appropriate compiler flags.,runLauncher.sh: line 15:     7 Killed                  dotnet QuantConnect.Lean.Launcher.dll --data-folder /Data 
--config /QuantConnect/backtesting/airlock/config.json --results-destination-folder /QuantConnect/backtesting/airlock/

 

Here's the Machine learning code that I'm using:

from AlgorithmImports import *

from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras import Sequential, metrics
import numpy as np
import tensorflow.keras as keras
from math import isnan

FEATURE_NUMBER = 16
FEATURES = ['_1d_rtn', '_3d_rtn', '_5d_rtn', '_10d_rtn', '_20d_rtn', '_60d_rtn', '_1d_volume', '_3d_volume', '_5d_volume', '_10d_volume', '_20d_volume', '_60d_volume', '_macd', '_macd_histo', '_rsi', '_awesome_oscillator',]
LOGNORMAL_FEATURE = ['_rsi', '_macd_histo']

class MyNNModel:
    def __init__(self, algorithm):
        self.model = None
        self.__CreateModel()
        self.algorithm = algorithm

    def __CreateModel(self):
        '''Creates the neural network model'''
        def recall_m(y_true, y_pred):
            true_positives = keras.backend.sum(keras.backend.round(keras.backend.clip(y_true * y_pred, 0, 1)))
            possible_positives = keras.backend.sum(keras.backend.round(keras.backend.clip(y_true, 0, 1)))
            recall = true_positives / (possible_positives + keras.backend.epsilon())
            return recall

        def precision_m(y_true, y_pred):
            true_positives = keras.backend.sum(keras.backend.round(keras.backend.clip(y_true * y_pred, 0, 1)))
            predicted_positives = keras.backend.sum(keras.backend.round(keras.backend.clip(y_pred, 0, 1)))
            precision = true_positives / (predicted_positives + keras.backend.epsilon())
            return precision

        def f1_score_func(y_true, y_pred):
            precision = precision_m(y_true, y_pred)
            recall = recall_m(y_true, y_pred)
            return 2*((precision*recall)/(precision+recall+keras.backend.epsilon()))

        self.model = Sequential()
        self.model.add(Dense(256, activation="relu", input_shape=(FEATURE_NUMBER, ), name="dense_1"))
        self.model.add(Dropout(0.1),)
        self.model.add(Dense(256, activation="relu", name="dense_2"))
        self.model.add(Dropout(0.1)),
        self.model.add(Dense(1, activation="sigmoid", name="predictions"))

        # optimizer = Adam(learning_rate=0.01)

        # Compile model
        self.model.compile(
            optimizer='Adam',
            loss='binary_crossentropy',
            # metrics=[
            #     metrics.AUC(),
            #     metrics.Precision(name='precision'),
            #     metrics.Recall(name='recall'),
            #     metrics.BinaryAccuracy(),
            #     f1_score_func,
            # ],
        )

    def __PrepareData(self, data):
        '''Prepares the data for a format friendly for our model'''
        data_tmp = data.dropna()

        X_train = data_tmp.loc[:, FEATURES]
        y_train = (data_tmp.loc[:, '_y_trade_rtn'] > 0.05).astype(int)

        # Winsorization
        X_train = self.__WinsorizeCustom(X_train, FEATURES)

        # Log transform
        X_train = self.__LogCustom(X_train, LOGNORMAL_FEATURE)

        return X_train, y_train

    def __WinsorizeCustom(self, df, cols: list):
        for col in cols:
            quantiles = df.loc[:, col].quantile([0.05, 0.95])
            q_05 = quantiles.loc[0.05]
            q_95 = quantiles.loc[0.95]

            df.loc[:, col] = np.where(
                df.loc[:, col].values <= q_05,
                q_05,
                np.where(df.loc[:, col].values >= q_95, q_95, df.loc[:, col].values)
            )
        return df

    def __LogCustom(self, df, cols: list):
        for col in cols:
            df.loc[:, col] = np.log(df.loc[:, col])
        return df

    def Train(self, data):
        '''Trains the model'''
        X_train, y_train = self.__PrepareData(data)
        self.model.fit(X_train, y_train, epochs=10,)

    def Predict(self, input_data, symbol):
        '''Makes a prediction on the direction of the future stock price'''
        if not self.model:
            return False

        if input_data.isna().values.any():
            return False

        predict = self.model.predict(input_data)

        # if isnan(predict[0][0]):
        #     self.algorithm.Debug(f'{self.algorithm.Time} [None]: [{symbol}]')
        #     self.algorithm.Debug(f'{input_data.iloc[:, :4]}')
        #     self.algorithm.Debug(f'{input_data.iloc[:, 4:8]}')
        #     self.algorithm.Debug(f'{input_data.iloc[:, 8:12]}')
        #     self.algorithm.Debug(f'{input_data.iloc[:, 12:]}')
        #     self.algorithm.Debug(f'Predict result: {predict} {type(predict)}')
        #     self.algorithm.Debug(f'Predict result: {predict[0]} {type(predict[0])}')
        #     self.algorithm.Debug(f'Predict result: {predict[0][0]} {type(predict[0][0])}')
        #     self.algorithm.Debug(f'{isnan(predict)}, {isnan(predict[0])}, {isnan(predict[0][0])}')
        #     return 0
        return round(predict[0][0])

Author