Overall Statistics
Total Trades
7769
Average Win
0.23%
Average Loss
-0.14%
Compounding Annual Return
214.523%
Drawdown
51.900%
Expectancy
0.301
Net Profit
422.980%
Sharpe Ratio
2.802
Probabilistic Sharpe Ratio
82.302%
Loss Rate
50%
Win Rate
50%
Profit-Loss Ratio
1.62
Alpha
0.927
Beta
0.656
Annual Standard Deviation
0.544
Annual Variance
0.296
Information Ratio
1.446
Tracking Error
0.425
Treynor Ratio
2.323
Total Fees
$584617.03
import torch
from ddpg_agent import Agent
import numpy as np
import pandas as pd
import pickle
from io import BytesIO
from collections import deque
import base64
        
class NadionTransdimensionalAtmosphericScrubbers(QCAlgorithm):
    
    def Initialize(self):
        
        self.window = 64
        self.channels = 24
        self.SetCash(100000)

        self.agent = Agent(num_channels = self.channels, state_size=self.window, action_size=1, random_seed=101)
        
        act_b64_str = self.Download("https://www.dropbox.com/s/70iodbiktu4ewaw/b64_actor_ep836_checkpoint.pth?dl=1")
        cri_b64_str = self.Download("https://www.dropbox.com/s/z84uwl2gmb9kpvl/b64_critic_ep836_checkpoint.pth?dl=1")
        
        # String Encode to bytes
        act_b = act_b64_str.encode("UTF-8")
        
        # Decoding the Base64 bytes
        act_d = base64.b64decode(act_b)

        # String Encode to bytes
        cri_b = cri_b64_str.encode("UTF-8")
        
        # Decoding the Base64 bytes
        cri_d = base64.b64decode(cri_b)

        self.agent.actor_local.load_state_dict(torch.load(BytesIO(act_d), map_location=lambda storage, loc: storage))
        self.agent.critic_local.load_state_dict(torch.load(BytesIO(cri_d), map_location=lambda storage, loc: storage))
        
        self.symbolList = ["BTCUSD","ETHUSD","LTCUSD","BCHUSD"]
        self.rollingWindow = {}
        self.weights = {}
        self.min_oder = {"BTCUSD":0.01,"ETHUSD":0.01,"LTCUSD":0.1,"XRPUSD":1,"BCHUSD":0.01,"EOSUSD":0.1}
        
        
        for name in self.symbolList:
            self.AddCrypto(name, Resolution.Minute, Market.GDAX)
            length = self.window+1
            self.rollingWindow["close_15min_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["close_30min_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["close_1hr_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["close_2hr_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["close_4hr_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["close_8hr_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["close_16hr_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["close_32hr_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["volume_15min_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["volume_30min_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["volume_1hr_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["volume_2hr_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["volume_4hr_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["volume_8hr_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["volume_16hr_{0}".format(name)] = deque(maxlen=length)
            self.rollingWindow["volume_32hr_{0}".format(name)] = deque(maxlen=length)
            
            # https://www.quantconnect.com/tutorials/api-tutorials/consolidating-data-to-build-bars
            fifteenConsolidator = TradeBarConsolidator(timedelta(minutes=15))
            fifteenConsolidator.DataConsolidated += self.FifteenConsolidator
            self.SubscriptionManager.AddConsolidator(name, fifteenConsolidator)
            
            thirtyConsolidator = TradeBarConsolidator(timedelta(minutes=30))
            thirtyConsolidator.DataConsolidated += self.ThirtyConsolidator
            self.SubscriptionManager.AddConsolidator(name, thirtyConsolidator)
            
            oneHourConsolidator = TradeBarConsolidator(timedelta(hours=1))
            oneHourConsolidator.DataConsolidated += self.OneHourConsolidator
            self.SubscriptionManager.AddConsolidator(name, oneHourConsolidator)
            
            twoHourConsolidator = TradeBarConsolidator(timedelta(hours=2))
            twoHourConsolidator.DataConsolidated += self.TwoHourConsolidator
            self.SubscriptionManager.AddConsolidator(name, twoHourConsolidator)
            
            fourHourConsolidator = TradeBarConsolidator(timedelta(hours=4))
            fourHourConsolidator.DataConsolidated += self.FourHourConsolidator
            self.SubscriptionManager.AddConsolidator(name, fourHourConsolidator)
            
            eightHourConsolidator = TradeBarConsolidator(timedelta(hours=8))
            eightHourConsolidator.DataConsolidated += self.EightHourConsolidator
            self.SubscriptionManager.AddConsolidator(name, eightHourConsolidator)
            
            sixteenHourConsolidator = TradeBarConsolidator(timedelta(hours=16))
            sixteenHourConsolidator.DataConsolidated += self.SixteenHourConsolidator
            self.SubscriptionManager.AddConsolidator(name, sixteenHourConsolidator)

            thirdytwoHourConsolidator = TradeBarConsolidator(timedelta(hours=32))
            thirdytwoHourConsolidator.DataConsolidated += self.ThirdytwoHourConsolidator
            self.SubscriptionManager.AddConsolidator(name, thirdytwoHourConsolidator)
            
        self.SetBrokerageModel(BrokerageName.GDAX, AccountType.Cash)
        self.SetStartDate(2019, 1, 1)
        self.SetBenchmark("BTCUSD")
        self.SetWarmUp(int((self.window+1)*64*60))
        
    def std(self, x):
        y = (x - x.mean()) / x.std()
        return y
        
    def Features(self, symbol):
        
        obs = np.array([])
        
        a  =  np.diff(np.log(np.array(self.rollingWindow["close_15min_{0}".format(symbol)]))) / 0.008
        aa =  self.std(np.array(self.rollingWindow["close_15min_{0}".format(symbol)])[:-1])
        aaa =  self.std(np.array(self.rollingWindow["volume_15min_{0}".format(symbol)])[:-1])
        
        b  =  np.diff(np.log(np.array(self.rollingWindow["close_30min_{0}".format(symbol)])))  / 0.008 / 2
        bb =  self.std(np.array(self.rollingWindow["close_30min_{0}".format(symbol)])[:-1])
        bbb =  self.std(np.array(self.rollingWindow["volume_30min_{0}".format(symbol)])[:-1])

        c  =  np.diff(np.log(np.array(self.rollingWindow["close_1hr_{0}".format(symbol)])))  / 0.008 / 3
        cc =  self.std(np.array(self.rollingWindow["close_1hr_{0}".format(symbol)])[:-1])
        ccc =  self.std(np.array(self.rollingWindow["volume_1hr_{0}".format(symbol)])[:-1])
        
        d  =  np.diff(np.log(np.array(self.rollingWindow["close_2hr_{0}".format(symbol)])))  / 0.008 / 4
        dd =  self.std(np.array(self.rollingWindow["close_2hr_{0}".format(symbol)])[:-1])
        ddd =  self.std(np.array(self.rollingWindow["volume_2hr_{0}".format(symbol)])[:-1])
        
        e  =  np.diff(np.log(np.array(self.rollingWindow["close_4hr_{0}".format(symbol)])))  / 0.008 / 5
        ee =  self.std(np.array(self.rollingWindow["close_4hr_{0}".format(symbol)])[:-1])
        eee =  self.std(np.array(self.rollingWindow["volume_4hr_{0}".format(symbol)])[:-1])
        
        f  =  np.diff(np.log(np.array(self.rollingWindow["close_8hr_{0}".format(symbol)])))  / 0.008 / 6
        ff =  self.std(np.array(self.rollingWindow["close_8hr_{0}".format(symbol)])[:-1])
        fff =  self.std(np.array(self.rollingWindow["volume_8hr_{0}".format(symbol)])[:-1])
        
        g  =  np.diff(np.log(np.array(self.rollingWindow["close_16hr_{0}".format(symbol)])))  / 0.008 / 7
        gg =  self.std(np.array(self.rollingWindow["close_16hr_{0}".format(symbol)])[:-1])
        ggg =  self.std(np.array(self.rollingWindow["volume_16hr_{0}".format(symbol)])[:-1])
        
        h  =  np.diff(np.log(np.array(self.rollingWindow["close_32hr_{0}".format(symbol)])))  / 0.008 / 8
        hh  =  self.std(np.array(self.rollingWindow["close_32hr_{0}".format(symbol)])[:-1])
        hhh =  self.std(np.array(self.rollingWindow["volume_32hr_{0}".format(symbol)])[:-1])
        
        obs = np.concatenate((a,aa,aaa,b,bb,bbb,c,cc,ccc,d,dd,ddd,e,ee,eee,f,ff,fff,g,gg,ggg,h,hh,hhh), axis=0)
        
        obs[np.isnan(obs)] = 0
        obs[np.isinf(obs)] = 0
        obs[np.isneginf(obs)] = 0

        state = np.asarray(obs)

        return state.reshape(1, self.channels, self.window)
        
    def normalize(self,x):
        return np.round((x+1)/2*0.25,2)
        
    def FifteenConsolidator(self, sender, bar):
        
        self.rollingWindow["close_15min_{0}".format(bar.Symbol)].appendleft(bar.Close)
        self.rollingWindow["volume_15min_{0}".format(bar.Symbol)].appendleft(bar.Volume)
        
        symbol = str(bar.Symbol)
        
        Close = bar.Close
        Volume = bar.Volume
        
        if not self.IsWarmingUp and len(self.rollingWindow["close_32hr_{0}".format(bar.Symbol)]) >= 33:
            
            # Check current portfolio for ratio
            if self.Securities[symbol].Invested:
                currentweight = (self.Portfolio[symbol].Quantity * Close) /self.Portfolio.TotalPortfolioValue
            else:
                currentweight = 0.0
            
            self.weights[symbol] = currentweight
            
            state = self.Features(symbol)
            action = self.agent.act(state)
            weight = round(float(self.normalize(action[0])),2)
            
            self.Debug("sym {} weight {} current {}".format(symbol, weight, currentweight))
            
            if weight >= self.weights[symbol] + 0.05:
                self.SetHoldings(symbol, weight)

            elif weight <= self.weights[symbol] - 0.05 or weight == 0:
                self.SetHoldings(symbol, weight)
            else:
                pass
                
    def ThirtyConsolidator(self, sender, bar):
        self.rollingWindow["close_30min_{0}".format(bar.Symbol)].appendleft(bar.Close)
        self.rollingWindow["volume_30min_{0}".format(bar.Symbol)].appendleft(bar.Volume)
        
    def OneHourConsolidator(self, sender, bar):
        self.rollingWindow["close_1hr_{0}".format(bar.Symbol)].appendleft(bar.Close)
        self.rollingWindow["volume_1hr_{0}".format(bar.Symbol)].appendleft(bar.Volume)
        
    def TwoHourConsolidator(self, sender, bar):
        self.rollingWindow["close_2hr_{0}".format(bar.Symbol)].appendleft(bar.Close)
        self.rollingWindow["volume_2hr_{0}".format(bar.Symbol)].appendleft(bar.Volume)

    def FourHourConsolidator(self, sender, bar):
        self.rollingWindow["close_4hr_{0}".format(bar.Symbol)].appendleft(bar.Close)
        self.rollingWindow["volume_4hr_{0}".format(bar.Symbol)].appendleft(bar.Volume)
        
    def EightHourConsolidator(self, sender, bar):
        self.rollingWindow["close_8hr_{0}".format(bar.Symbol)].appendleft(bar.Close)
        self.rollingWindow["volume_8hr_{0}".format(bar.Symbol)].appendleft(bar.Volume)
        
    def SixteenHourConsolidator(self, sender, bar):
        self.rollingWindow["close_16hr_{0}".format(bar.Symbol)].appendleft(bar.Close)
        self.rollingWindow["volume_16hr_{0}".format(bar.Symbol)].appendleft(bar.Volume)
        
    def ThirdytwoHourConsolidator(self, sender, bar):
        self.rollingWindow["close_32hr_{0}".format(bar.Symbol)].appendleft(bar.Close)
        self.rollingWindow["volume_32hr_{0}".format(bar.Symbol)].appendleft(bar.Volume)
        
    def OnData(self, data):
        pass
    
    def OnOrderEvent(self, orderEvent):
        self.Debug("{} {}".format(self.Time, orderEvent.ToString()))

    def OnEndOfAlgorithm(self):
        self.Log("{} - TotalPortfolioValue: {}".format(self.Time, self.Portfolio.TotalPortfolioValue))
        self.Log("{} - CashBook: {}".format(self.Time, self.Portfolio.CashBook))
import numpy as np
from model import Actor, Critic
import torch
import torch.optim as optim

WEIGHT_DECAY = 0.000
device = torch.device("cpu")

class Agent():
    def __init__(self, num_channels, state_size, action_size, random_seed):
        self.state_size = state_size
        self.action_size = action_size
        self.seed = 0

        # Actor Network (w/ Target Network)
        self.actor_local = Actor(num_channels, state_size, action_size, random_seed).to(device)
        self.actor_target = Actor(num_channels, state_size, action_size, random_seed).to(device)

        # Critic Network (w/ Target Network)
        self.critic_local = Critic(num_channels, state_size, action_size, random_seed).to(device)
        self.critic_target = Critic(num_channels, state_size, action_size, random_seed).to(device)

    def act(self, state):
        """Returns actions for given state as per current policy."""
        state = torch.from_numpy(state).float().to(device)
        self.actor_local.eval()
        with torch.no_grad():
            action = self.actor_local(state).cpu().data.numpy()
        return np.clip(action, -1, 1)
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

def hidden_init(layer):
    fan_in = layer.weight.data.size()[0]
    lim = 1. / np.sqrt(fan_in)
    return (-lim, lim)

class Actor(nn.Module):
    """Actor (Policy) Model."""

    def __init__(self, num_channels, state_size, action_size, seed, conv1=100, conv2=100, fc1_units=200, kernel_size=10, stride=1):
        super(Actor, self).__init__()
        self.seed = torch.manual_seed(seed)

        self.conv1 = nn.Conv1d(in_channels=num_channels, out_channels=conv1, kernel_size=kernel_size, stride=stride)
        self.conv2 = nn.Conv1d(in_channels=conv1, out_channels=conv2, kernel_size=kernel_size, stride=stride)
        self.poolAvg = nn.AvgPool1d(kernel_size=3)
        self.dropout = nn.Dropout(0.25) 

        def conv1d_size_out(size):
            return (size - (kernel_size - 1) - 1) // stride  + 1
        
        convw = conv1d_size_out(conv1d_size_out(state_size)//3)//3
        
        self.linear_input_size = convw * conv2 #Make sure its convw x last conv!

        self.fc1 = nn.Linear(self.linear_input_size, fc1_units)
        self.fc2 = nn.Linear(fc1_units, action_size)
        self.reset_parameters()

    def reset_parameters(self):
        self.conv1.weight.data.uniform_(*hidden_init(self.conv1))
        self.conv2.weight.data.uniform_(*hidden_init(self.conv2))
        self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
        self.fc2.weight.data.uniform_(-3e-3, 3e-3)


    def forward(self, state):
        """Build an actor (policy) network that maps states -> actions.""" 
        
        x = F.relu(self.conv1(state))
        x = self.poolAvg(x)
        x = F.relu(self.conv2(x))
        x = self.poolAvg(x)       
        x = x.view(-1, self.linear_input_size)
        x = F.relu(self.fc1(x))

        return torch.tanh(self.fc2(x))

class Critic(nn.Module):
    """Critic (Value) Model."""

    def __init__(self, num_channels, state_size, action_size, seed, conv1=100, conv2=100, fcs1_units=200, kernel_size=10, stride=1):
        super(Critic, self).__init__()
        self.seed = torch.manual_seed(seed)

        self.conv1 = nn.Conv1d(in_channels=num_channels, out_channels=conv1, kernel_size=kernel_size, stride=stride)
        self.conv2 = nn.Conv1d(in_channels=conv1, out_channels=conv2, kernel_size=kernel_size, stride=stride)
        self.poolAvg = nn.AvgPool1d(kernel_size=3)
        self.dropout = nn.Dropout(0.25)
        
        def conv1d_size_out(size):
            return (size - (kernel_size - 1) - 1) // stride  + 1
        
        convw = conv1d_size_out(conv1d_size_out(state_size)//3)//3
        
        self.linear_input_size = convw * conv2 #Make sure its convw x last conv!
        
        self.fcs1 = nn.Linear(self.linear_input_size+ action_size, fcs1_units)
        self.fc2 = nn.Linear(fcs1_units, 1)
        self.reset_parameters()    

    def reset_parameters(self):
        self.conv1.weight.data.uniform_(*hidden_init(self.conv1))
        self.conv2.weight.data.uniform_(*hidden_init(self.conv2))
        self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
        self.fc2.weight.data.uniform_(-3e-3, 3e-3)

    def forward(self, state, action):
        """Build a critic (value) network that maps (state, action) pairs -> Q-values."""
    
        x = F.relu(self.conv1(state))
        x = self.poolAvg(x)
        x = F.relu(self.conv2(x))
        x = self.poolAvg(x)
        xs = x.view(-1, self.linear_input_size)
        x = torch.cat([xs, action], dim=1)
        x = F.relu(self.fcs1(x))
        
        return self.fc2(x)