| Overall Statistics |
|
Total Trades 179 Average Win 7.77% Average Loss -3.10% Compounding Annual Return 69.426% Drawdown 27.400% Expectancy 1.085 Net Profit 974.543% Sharpe Ratio 1.709 Loss Rate 41% Win Rate 59% Profit-Loss Ratio 2.51 Alpha 0.565 Beta 0.115 Annual Standard Deviation 0.329 Annual Variance 0.108 Information Ratio 1.558 Tracking Error 0.381 Treynor Ratio 4.88 Total Fees $0.00 |
// Accord Machine Learning Library
// The Accord.NET Framework
// http://accord-framework.net
//
// AForge Machine Learning Library
// AForge.NET framework
//
// Copyright © Andrew Kirillov, 2007-2008
// andrew.kirillov@gmail.com
//
// Copyright © César Souza, 2009-2017
// cesarsouza at gmail.com
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
//
namespace QuantConnect.Algorithm.CSharp
{
using Newtonsoft.Json;
using System;
using System.Linq;
public class RepeatingQLearning : QLearning
{
public RepeatingQLearning(int states, int actions, IExplorationPolicy explorationPolicy)
: this(states, actions, explorationPolicy, true)
{
}
public RepeatingQLearning(int states, int actions, IExplorationPolicy explorationPolicy, bool randomize)
: base(states, actions, explorationPolicy, randomize)
{
}
protected override double GetUpdateStep(int state, int action)
{
double[] actionEstimations = qvalues[state];
double sum = 0;
for (int i = 0; i < actionEstimations.Length; i++)
{
sum += actionEstimations[i];
}
double actionProbability = actionEstimations[action] / sum;
double exponent = 1 / actionProbability;
return 1 - Math.Pow(1 - learningRate, exponent);
}
}
public class DynaQ : QLearning
{
private readonly Random _rng = new Random();
[JsonProperty]
private readonly double[][] rewards;
[JsonProperty]
private readonly int[][][] stateActionCount;
public DynaQ(int states, int actions, IExplorationPolicy explorationPolicy)
: this(states, actions, explorationPolicy, true)
{
}
public DynaQ(int states, int actions, IExplorationPolicy explorationPolicy, bool randomize)
: base(states, actions, explorationPolicy, randomize)
{
rewards = new double[states][];
stateActionCount = new int[states][][];
for (int i = 0; i < states; i++)
{
rewards[i] = new double[actions];
stateActionCount[i] = new int[actions][];
for (int j = 0; j < actions; ++j)
{
stateActionCount[i][j] = new int[states];
}
}
}
public void Hallucinate(int previousState, int action, double reward, int nextState, int numHallucinations)
{
stateActionCount[previousState][action][nextState] += 1;
rewards[previousState][action] = (1 - LearningRate) * rewards[previousState][action] + LearningRate * reward;
for (int k = 0; k < numHallucinations; ++k)
{
Hallucinate();
}
}
private void Hallucinate()
{
int pastState = _rng.Next(StatesCount);
int action = _rng.Next(ActionsCount);
double reward = rewards[pastState][action];
var transitions = stateActionCount[pastState][action];
int countSum = transitions.Sum();
int nextState;
if (countSum > 0)
{
nextState = 0;
var r = _rng.Next(countSum);
for (int n = transitions.Length - 1; nextState < n; ++nextState)
{
var count = transitions[nextState];
if (r < count)
break;
r -= count;
}
}
else
{
nextState = _rng.Next(StatesCount);
}
UpdateState(pastState, action, reward, nextState);
}
}
/// <summary>
/// QLearning learning algorithm.
/// </summary>
///
/// <remarks>The class provides implementation of Q-Learning algorithm, known as
/// off-policy Temporal Difference control.</remarks>
///
/// <seealso cref="Sarsa"/>
///
public class QLearning
{
// amount of possible states
[JsonProperty]
protected int states;
// amount of possible actions
[JsonProperty]
protected int actions;
// q-values
[JsonProperty]
protected double[][] qvalues;
// exploration policy
protected IExplorationPolicy explorationPolicy;
// discount factor
[JsonProperty]
protected double discountFactor = 0.95;
// learning rate
[JsonProperty]
protected double learningRate = 0.25;
/// <summary>
/// Amount of possible states.
/// </summary>
///
public int StatesCount
{
get { return states; }
}
/// <summary>
/// Amount of possible actions.
/// </summary>
///
public int ActionsCount
{
get { return actions; }
}
/// <summary>
/// Exploration policy.
/// </summary>
///
/// <remarks>Policy, which is used to select actions.</remarks>
///
public IExplorationPolicy ExplorationPolicy
{
get { return explorationPolicy; }
set { explorationPolicy = value; }
}
/// <summary>
/// Learning rate, [0, 1].
/// </summary>
///
/// <remarks>The value determines the amount of updates Q-function receives
/// during learning. The greater the value, the more updates the function receives.
/// The lower the value, the less updates it receives.</remarks>
///
public double LearningRate
{
get { return learningRate; }
set
{
if (value < 0 || value > 1.0)
throw new ArgumentOutOfRangeException("Argument should be between 0 and 1.");
learningRate = value;
}
}
/// <summary>
/// Discount factor, [0, 1].
/// </summary>
///
/// <remarks>Discount factor for the expected summary reward. The value serves as
/// multiplier for the expected reward. So if the value is set to 1,
/// then the expected summary reward is not discounted. If the value is getting
/// smaller, then smaller amount of the expected reward is used for actions'
/// estimates update.</remarks>
///
public double DiscountFactor
{
get { return discountFactor; }
set
{
if (value < 0 || value > 1.0)
throw new ArgumentOutOfRangeException("Discount factor should be between 0 and 1.");
discountFactor = value;
}
}
/// <summary>
/// Initializes a new instance of the <see cref="QLearning"/> class.
/// </summary>
///
/// <param name="states">Amount of possible states.</param>
/// <param name="actions">Amount of possible actions.</param>
/// <param name="explorationPolicy">Exploration policy.</param>
///
/// <remarks>Action estimates are randomized in the case of this constructor
/// is used.</remarks>
///
public QLearning(int states, int actions, IExplorationPolicy explorationPolicy) :
this(states, actions, explorationPolicy, true)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="QLearning"/> class.
/// </summary>
///
/// <param name="states">Amount of possible states.</param>
/// <param name="actions">Amount of possible actions.</param>
/// <param name="explorationPolicy">Exploration policy.</param>
/// <param name="randomize">Randomize action estimates or not.</param>
///
/// <remarks>The <b>randomize</b> parameter specifies if initial action estimates should be randomized
/// with small values or not. Randomization of action values may be useful, when greedy exploration
/// policies are used. In this case randomization ensures that actions of the same type are not chosen always.</remarks>
///
public QLearning(int states, int actions, IExplorationPolicy explorationPolicy, bool randomize)
{
this.states = states;
this.actions = actions;
this.explorationPolicy = explorationPolicy;
// create Q-array
qvalues = new double[states][];
for (int i = 0; i < states; i++)
{
qvalues[i] = new double[actions];
}
// do randomization
if (randomize)
{
Random rand = new Random();
for (int i = 0; i < states; i++)
{
for (int j = 0; j < actions; j++)
{
qvalues[i][j] = rand.NextDouble() / 10;
}
}
}
}
/// <summary>
/// Get next action from the specified state.
/// </summary>
///
/// <param name="state">Current state to get an action for.</param>
///
/// <returns>Returns the action for the state.</returns>
///
/// <remarks>The method returns an action according to current
/// <see cref="ExplorationPolicy">exploration policy</see>.</remarks>
///
public int GetAction(int state)
{
return explorationPolicy.ChooseAction(qvalues[state]);
}
protected virtual double GetUpdateStep(int state, int action)
{
return learningRate;
}
/// <summary>
/// Update Q-function's value for the previous state-action pair.
/// </summary>
///
/// <param name="previousState">Previous state.</param>
/// <param name="action">Action, which leads from previous to the next state.</param>
/// <param name="reward">Reward value, received by taking specified action from previous state.</param>
/// <param name="nextState">Next state.</param>
///
public void UpdateState(int previousState, int action, double reward, int nextState)
{
// next state's action estimations
double[] nextActionEstimations = qvalues[nextState];
// find maximum expected summary reward from the next state
double maxNextExpectedReward = nextActionEstimations[0];
for (int i = 1; i < actions; i++)
{
if (nextActionEstimations[i] > maxNextExpectedReward)
maxNextExpectedReward = nextActionEstimations[i];
}
// previous state's action estimations
double[] previousActionEstimations = qvalues[previousState];
// update expected summary reward of the previous state
double alpha = GetUpdateStep(previousState, action);
previousActionEstimations[action] *= (1.0 - alpha);
previousActionEstimations[action] += (alpha * (reward + discountFactor * maxNextExpectedReward));
}
}
/// <summary>
/// Sarsa learning algorithm.
/// </summary>
///
/// <remarks>The class provides implementation of Sarsa algorithm, known as
/// on-policy Temporal Difference control.</remarks>
///
/// <seealso cref="QLearning"/>
///
public class Sarsa
{
// amount of possible states
private int states;
// amount of possible actions
private int actions;
// q-values
private double[][] qvalues;
// exploration policy
private IExplorationPolicy explorationPolicy;
// discount factor
private double discountFactor = 0.95;
// learning rate
private double learningRate = 0.25;
/// <summary>
/// Amount of possible states.
/// </summary>
///
public int StatesCount
{
get { return states; }
}
/// <summary>
/// Amount of possible actions.
/// </summary>
///
public int ActionsCount
{
get { return actions; }
}
/// <summary>
/// Exploration policy.
/// </summary>
///
/// <remarks>Policy, which is used to select actions.</remarks>
///
public IExplorationPolicy ExplorationPolicy
{
get { return explorationPolicy; }
set { explorationPolicy = value; }
}
/// <summary>
/// Learning rate, [0, 1].
/// </summary>
///
/// <remarks>The value determines the amount of updates Q-function receives
/// during learning. The greater the value, the more updates the function receives.
/// The lower the value, the less updates it receives.</remarks>
///
public double LearningRate
{
get { return learningRate; }
set
{
if (value < 0 || value > 1)
throw new ArgumentOutOfRangeException("Learning rate must be between 0 and 1.");
learningRate = value;
}
}
/// <summary>
/// Discount factor, [0, 1].
/// </summary>
///
/// <remarks>Discount factor for the expected summary reward. The value serves as
/// multiplier for the expected reward. So if the value is set to 1,
/// then the expected summary reward is not discounted. If the value is getting
/// smaller, then smaller amount of the expected reward is used for actions'
/// estimates update.</remarks>
///
public double DiscountFactor
{
get { return discountFactor; }
set
{
if (value < 0 || value > 1)
throw new ArgumentOutOfRangeException("Discount factor must be between 0 and 1.");
discountFactor = value;
}
}
/// <summary>
/// Initializes a new instance of the <see cref="Sarsa"/> class.
/// </summary>
///
/// <param name="states">Amount of possible states.</param>
/// <param name="actions">Amount of possible actions.</param>
/// <param name="explorationPolicy">Exploration policy.</param>
///
/// <remarks>Action estimates are randomized in the case of this constructor
/// is used.</remarks>
///
public Sarsa(int states, int actions, IExplorationPolicy explorationPolicy) :
this(states, actions, explorationPolicy, true)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="Sarsa"/> class.
/// </summary>
///
/// <param name="states">Amount of possible states.</param>
/// <param name="actions">Amount of possible actions.</param>
/// <param name="explorationPolicy">Exploration policy.</param>
/// <param name="randomize">Randomize action estimates or not.</param>
///
/// <remarks>The <b>randomize</b> parameter specifies if initial action estimates should be randomized
/// with small values or not. Randomization of action values may be useful, when greedy exploration
/// policies are used. In this case randomization ensures that actions of the same type are not chosen always.</remarks>
///
public Sarsa(int states, int actions, IExplorationPolicy explorationPolicy, bool randomize)
{
this.states = states;
this.actions = actions;
this.explorationPolicy = explorationPolicy;
// create Q-array
qvalues = new double[states][];
for (int i = 0; i < states; i++)
{
qvalues[i] = new double[actions];
}
// do randomization
if (randomize)
{
Random rand = new Random();
for (int i = 0; i < states; i++)
{
for (int j = 0; j < actions; j++)
{
qvalues[i][j] = rand.NextDouble() / 10;
}
}
}
}
/// <summary>
/// Get next action from the specified state.
/// </summary>
///
/// <param name="state">Current state to get an action for.</param>
///
/// <returns>Returns the action for the state.</returns>
///
/// <remarks>The method returns an action according to current
/// <see cref="ExplorationPolicy">exploration policy</see>.</remarks>
///
public int GetAction(int state)
{
return explorationPolicy.ChooseAction(qvalues[state]);
}
/// <summary>
/// Update Q-function's value for the previous state-action pair.
/// </summary>
///
/// <param name="previousState">Curren state.</param>
/// <param name="previousAction">Action, which lead from previous to the next state.</param>
/// <param name="reward">Reward value, received by taking specified action from previous state.</param>
/// <param name="nextState">Next state.</param>
/// <param name="nextAction">Next action.</param>
///
/// <remarks>Updates Q-function's value for the previous state-action pair in
/// the case if the next state is non terminal.</remarks>
///
public void UpdateState(int previousState, int previousAction, double reward, int nextState, int nextAction)
{
// previous state's action estimations
double[] previousActionEstimations = qvalues[previousState];
// update expected summary reward of the previous state
previousActionEstimations[previousAction] *= (1.0 - learningRate);
previousActionEstimations[previousAction] += (learningRate * (reward + discountFactor *
qvalues[nextState][nextAction]));
}
/// <summary>
/// Update Q-function's value for the previous state-action pair.
/// </summary>
///
/// <param name="previousState">Curren state.</param>
/// <param name="previousAction">Action, which lead from previous to the next state.</param>
/// <param name="reward">Reward value, received by taking specified action from previous state.</param>
///
/// <remarks>Updates Q-function's value for the previous state-action pair in
/// the case if the next state is terminal.</remarks>
///
public void UpdateState(int previousState, int previousAction, double reward)
{
// previous state's action estimations
double[] previousActionEstimations = qvalues[previousState];
// update expected summary reward of the previous state
previousActionEstimations[previousAction] *= (1.0 - learningRate);
previousActionEstimations[previousAction] += (learningRate * reward);
}
}
}// Accord Machine Learning Library
// The Accord.NET Framework
// http://accord-framework.net
//
// AForge Machine Learning Library
// AForge.NET framework
//
// Copyright © Andrew Kirillov, 2007-2008
// andrew.kirillov@gmail.com
//
// Copyright © César Souza, 2009-2017
// cesarsouza at gmail.com
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
//
namespace QuantConnect.Algorithm.CSharp
{
using System;
/// <summary>
/// Exploration policy interface.
/// </summary>
///
/// <remarks>The interface describes exploration policies, which are used in Reinforcement
/// Learning to explore state space.</remarks>
///
public interface IExplorationPolicy
{
/// <summary>
/// Choose an action.
/// </summary>
///
/// <param name="actionEstimates">Action estimates.</param>
///
/// <returns>Returns selected action.</returns>
///
/// <remarks>The method chooses an action depending on the provided estimates. The
/// estimates can be any sort of estimate, which values usefulness of the action
/// (expected summary reward, discounted reward, etc).</remarks>
///
int ChooseAction(double[] actionEstimates);
}
/// <summary>
/// Epsilon greedy exploration policy.
/// </summary>
///
/// <remarks><para>The class implements epsilon greedy exploration policy. According to the policy,
/// the best action is chosen with probability <b>1-epsilon</b>. Otherwise,
/// with probability <b>epsilon</b>, any other action, except the best one, is
/// chosen randomly.</para>
///
/// <para>According to the policy, the epsilon value is known also as exploration rate.</para>
/// </remarks>
///
/// <seealso cref="RouletteWheelExploration"/>
/// <seealso cref="BoltzmannExploration"/>
/// <seealso cref="TabuSearchExploration"/>
///
public class EpsilonGreedyExploration : IExplorationPolicy
{
// exploration rate
private double epsilon;
// random number generator
private Random rand = Accord.Math.Random.Generator.Random;
/// <summary>
/// Epsilon value (exploration rate), [0, 1].
/// </summary>
///
/// <remarks><para>The value determines the amount of exploration driven by the policy.
/// If the value is high, then the policy drives more to exploration - choosing random
/// action, which excludes the best one. If the value is low, then the policy is more
/// greedy - choosing the beat so far action.
/// </para></remarks>
///
public double Epsilon
{
get { return epsilon; }
set
{
if (value < 0 || value > 1.0)
throw new ArgumentOutOfRangeException("Epsilon should be between 0 and 1.");
epsilon = value;
}
}
/// <summary>
/// Initializes a new instance of the <see cref="EpsilonGreedyExploration"/> class.
/// </summary>
///
/// <param name="epsilon">Epsilon value (exploration rate).</param>
///
public EpsilonGreedyExploration(double epsilon)
{
Epsilon = epsilon;
}
/// <summary>
/// Choose an action.
/// </summary>
///
/// <param name="actionEstimates">Action estimates.</param>
///
/// <returns>Returns selected action.</returns>
///
/// <remarks>The method chooses an action depending on the provided estimates. The
/// estimates can be any sort of estimate, which values usefulness of the action
/// (expected summary reward, discounted reward, etc).</remarks>
///
public int ChooseAction(double[] actionEstimates)
{
// actions count
int actionsCount = actionEstimates.Length;
// find the best action (greedy)
double maxReward = actionEstimates[0];
int greedyAction = 0;
for (int i = 1; i < actionsCount; i++)
{
if (actionEstimates[i] > maxReward)
{
maxReward = actionEstimates[i];
greedyAction = i;
}
}
// try to do exploration
if (rand.NextDouble() < epsilon)
{
int randomAction = rand.Next(actionsCount - 1);
if (randomAction >= greedyAction)
randomAction++;
return randomAction;
}
return greedyAction;
}
}
/// <summary>
/// Roulette wheel exploration policy.
/// </summary>
///
/// <remarks><para>The class implements roulette whell exploration policy. Acording to the policy,
/// action <b>a</b> at state <b>s</b> is selected with the next probability:</para>
/// <code lang="none">
/// Q( s, a )
/// p( s, a ) = ------------------
/// SUM( Q( s, b ) )
/// b
/// </code>
/// <para>where <b>Q(s, a)</b> is action's <b>a</b> estimation (usefulness) at state <b>s</b>.</para>
///
/// <para><note>The exploration policy may be applied only in cases, when action estimates (usefulness)
/// are represented with positive value greater then 0.</note></para>
/// </remarks>
///
/// <seealso cref="BoltzmannExploration"/>
/// <seealso cref="EpsilonGreedyExploration"/>
/// <seealso cref="TabuSearchExploration"/>
///
public class RouletteWheelExploration : IExplorationPolicy
{
// random number generator
private Random rand = Accord.Math.Random.Generator.Random;
/// <summary>
/// Initializes a new instance of the <see cref="RouletteWheelExploration"/> class.
/// </summary>
///
public RouletteWheelExploration() { }
/// <summary>
/// Choose an action.
/// </summary>
///
/// <param name="actionEstimates">Action estimates.</param>
///
/// <returns>Returns selected action.</returns>
///
/// <remarks>The method chooses an action depending on the provided estimates. The
/// estimates can be any sort of estimate, which values usefulness of the action
/// (expected summary reward, discounted reward, etc).</remarks>
///
public int ChooseAction(double[] actionEstimates)
{
// actions count
int actionsCount = actionEstimates.Length;
// actions sum
double sum = 0, estimateSum = 0;
for (int i = 0; i < actionsCount; i++)
{
estimateSum += actionEstimates[i];
}
// get random number, which determines which action to choose
double actionRandomNumber = rand.NextDouble();
for (int i = 0; i < actionsCount; i++)
{
sum += actionEstimates[i] / estimateSum;
if (actionRandomNumber <= sum)
return i;
}
return actionsCount - 1;
}
}
}using QuantConnect.Data;
using QuantConnect.Data.Custom;
using QuantConnect.Indicators;
using QuantConnect.Securities;
using System;
using System.Collections.Generic;
using QuantConnect.Data.Market;
using System.Linq;
using QuantConnect.Data.Consolidators;
using MathNet.Numerics.Statistics;
using Accord.MachineLearning;
using Accord.Statistics.Distributions.DensityKernels;
using QuantConnect.Securities.Equity;
#if PETTER
using System.Windows.Media;
using LiveCharts.Wpf;
#endif
namespace QuantConnect.Algorithm.CSharp
{
public partial class TrendOppositeEntryAlgorithm : QCAlgorithm
{
private enum Charting
{
Disabled,
FullResolution,
TradesOnly,
DailyEquityOnly,
ProfitPerDelay,
}
private const Charting _charting = Charting.DailyEquityOnly;
private const decimal _maxLeverage = 30;
private const decimal _leverage = 1;
private static TrendOppositeEntryAlgorithm _instance;
public override void Initialize()
{
SetCash(400000);
if (true||_charting == Charting.DailyEquityOnly)
{
SetStartDate(2014, 1, 15);
SetEndDate(2018, 7, 15);
}
else
{
SetStartDate(2014, 1, 25);
SetEndDate(2018, 7, 10);
}
try
{
ActualInitialization();
}
catch (Exception e)
{
ReportException(e, this);
}
}
public static void ReportException(Exception e, QCAlgorithm algo)
{
algo.Error(algo.Time + ": " + e.Message + "\n" + e.StackTrace);
algo.Log(algo.Time + ": " + e.Message + "\n" + e.StackTrace);
}
private Security _security;
#if PETTER
private SignalView _signalView;
#endif
private void ActualInitialization()
{
_instance = this;
SetBrokerageModel(Brokerages.BrokerageName.OandaBrokerage);
var res = Resolution.Daily;
_security = AddCfd("WHEATUSD", res, leverage: _maxLeverage);
Schedule.On(DateRules.EveryDay(), TimeRules.At(0, 0, 0), AtMidnight);
//TODO: keeping this disabled unless we figure out it's a good idea
//Schedule.On(DateRules.EveryDay(), TimeRules.Every(TimeSpan.FromSeconds(5)), CheckForSensitiveEvents);
SetBenchmark(_security.Symbol);
#if PETTER
if (_charting != Charting.Disabled)
_signalView = SignalView.ShowInNewThread(CreateSignalViewOptions());
#endif
_evalBarMaker.DataConsolidated += _evalBarMaker_DataConsolidated;
_barMaker.DataConsolidated += _barMaker_DataConsolidated;
InitKernels();
}
#if PETTER
private SignalView.SetupOptions CreateSignalViewOptions()
{
var options = new SignalView.SetupOptions();
options.AxisGenerator = CreateExtraAxes;
return options;
}
private IEnumerable<Axis> CreateExtraAxes()
{
yield return new Axis()
{
MinValue = -210,
MaxValue = 210,
Sections = new SectionsCollection
{
new AxisSection
{
Value = 0,
SectionWidth = 1,
Stroke = new SolidColorBrush(Color.FromRgb(248, 213, 72))
},
/*new AxisSection
{
Value = 100,
SectionWidth = 1,
Fill = new SolidColorBrush
{
Color = Color.FromRgb(0, 255, 0),
Opacity = .8
}
},
new AxisSection
{
Value = -100,
SectionWidth = 1,
Fill = new SolidColorBrush
{
Color = Color.FromRgb(255, 0, 0),
Opacity = .8
}
}*/
}
};
}
#endif
private void AtMidnight()
{
#if PETTER
if (_charting != Charting.Disabled && _charting != Charting.ProfitPerDelay)
{
_signalView.SetXLabel(Time.ToShortDateString());
}
#endif
#if PETTER
if (_charting == Charting.DailyEquityOnly)
{
_signalView.PlotEquity((double)Portfolio.TotalPortfolioValue);
}
#endif
_equityAtStartOfDay = Portfolio.TotalPortfolioValue;
foreach (var kernel in _kernels)
kernel.AtMidnight();
}
public override void OnEndOfAlgorithm()
{
#if PETTER
if (_signalView != null)
_signalView.JoinThread();
#endif
}
private void CheckForSensitiveEvents()
{
/*_sensitiveEvents.Update();
if (_sensitiveEvents.InSensitiveZone && _components[0].Exchange.ExchangeOpen && _components[0].HoldStock)
{
ClosePosition();
}*/
}
private decimal _equityAtStartOfDay;
public override void OnData(Slice slice)
{
foreach (var kv in slice.QuoteBars)
{
OnBar(kv.Value);
}
}
private class Kernel
{
public int DirBias = -1;
private readonly RegressionChannel _equityFilter = new RegressionChannel(10, 2);
private readonly VirtualEquity _equity;
private decimal _lastSignal;
public decimal Rank
{
get
{
return _equityFilter;
}
}
private decimal _entryPrice;
public Kernel(Security sec)
{
_equity = new VirtualEquity(sec);
_equity.TradeFeeFraction = 0;
}
public void SetPosition(decimal signal)
{
if (signal != _lastSignal && _lastSignal != 0)
{
decimal exitPrice = _lastSignal < 0 ? _equity.Security.AskPrice : _equity.Security.BidPrice;
decimal profit = _lastSignal * (exitPrice / _entryPrice - 1);
}
_lastSignal = signal;
_equity.SetPosition(signal * DirBias);
_entryPrice = _lastSignal > 0 ? _equity.Security.AskPrice : _equity.Security.BidPrice;
}
public decimal GetPosition()
{
return _lastSignal * DirBias;
}
private decimal _preEquity;
public void OnEvalBar(QuoteBar bar)
{
if (_preEquity == 0)
_preEquity = _equity.GetEquity();
_equityFilter.Update(bar.Time, _equity.GetEquity() / _preEquity - 1);
_preEquity = _equity.GetEquity();
}
private int _lastMonth = -1;
public void AtMidnight()
{
if (_lastMonth >= 0 && _lastMonth != _instance.Time.Month)
{
//Reoptimize();
}
_lastMonth = _instance.Time.Month;
}
private class SimPosition
{
public decimal Price;
public int Dir;
public bool Closed;
public bool Real = true;
public decimal MAE;
public decimal MFE;
public SimPosition(decimal price, int dir)
{
Price = price;
Dir = dir;
}
public decimal GetExcursion(decimal price)
{
return Dir * (price / Price - 1);
}
public void Update(decimal price, int bias)
{
if (Closed)
return;
decimal excursion = GetExcursion(price);
MFE = Math.Max(MFE, excursion);
MAE = Math.Min(MAE, excursion);
if (bias != Dir)
Closed = true;
}
}
private readonly RollingWindow<SimPosition> _simPos = new RollingWindow<SimPosition>(20);
private ExponentialMovingAverage _ema20 = new ExponentialMovingAverage(20);
private decimal _anchorPrice;
public void OnTriggerBar(QuoteBar bar, decimal slippage)
{
int switchdir = 1;
int bias = Math.Sign(bar.Price - _ema20);
_ema20.Update(bar.Time, bar.Price);
if (_simPos.Any())
{
foreach (var pos in _simPos)
pos.Update(bar.Price, bias);
decimal meanMAE = _simPos.Average(x => x.MAE);
decimal meanMFE = _simPos.Average(x => x.MFE);
if (meanMAE > meanMFE)
switchdir = -1;
foreach (var pos in _simPos)
{
if (!pos.Real)
continue;
decimal excursion = pos.GetExcursion(bar.Price);
if (excursion > meanMFE)
pos.Real = false;
else if (excursion < meanMAE && bias != pos.Dir)
pos.Real = false;
}
}
if (_anchorPrice == 0)
_anchorPrice = bar.Price;
var change = bar.Price / _anchorPrice - 1;
if (Math.Abs(change) > 0.4m / 100)
{
_anchorPrice = bar.Price;
if (bias < 0)
{
//consider short entry
if (change > 0)
{
_simPos.Add(new SimPosition(bar.Price, bias));
}
}
else if (bias > 0)
{
//consider long entry
if (change < 0)
{
_simPos.Add(new SimPosition(bar.Price, bias));
}
}
}
if (_simPos.Count > 10)
{
decimal signal = _simPos.Where(x => x.Real).Sum(x => x.Dir);
SetPosition(signal * switchdir);
}
else
{
SetPosition(0);
}
}
}
private void SelectKernel()
{
_activeKernel = _kernels.OrderByDescending(x => x.Rank).First();
}
private Kernel _activeKernel;
private List<Kernel> _kernels = new List<Kernel>();
private readonly SimpleMovingAverage _graphSMA = new SimpleMovingAverage(60); //just charting position
private readonly QuoteBarConsolidator _evalBarMaker = new QuoteBarConsolidator(TimeSpan.FromDays(1));
private readonly QuoteBarConsolidator _barMaker = new QuoteBarConsolidator(TimeSpan.FromHours(1));
private void InitKernels()
{
var periods = new int[] { 5 };
var entries = new decimal[] { 0.6m, 0.66m, 0.7m, 0.75m, 0.8m, 0.85m, 0.9m, 0.95m };
var exits = new decimal[] { 0.1m, 0.3m, 0.5m, 0.55m, 0.6m, 0.66m, 0.7m, 0.75m };
//for (int dir = -1; dir <= 1; dir += 2)
{
//foreach (var period in periods)
{
//foreach (var exit in exits)
{
//foreach (var entry in entries)
{
//if (entry < exit + 0.1m)
//continue;
_kernels.Add(new Kernel(_security)
{
//DirBias = dir,
//EntryFactor = entry,
//ExitFactor = exit,
});
}
}
}
}
SelectKernel();
}
private void _evalBarMaker_DataConsolidated(object sender, QuoteBar bar)
{
foreach (var kernel in _kernels)
kernel.OnEvalBar(bar);
SelectKernel();
}
private void Note(string msg)
{
#if PETTER
if (!LiveMode)
Debug(msg);
#endif
}
decimal _lastPos;
decimal _pos;
//for stats
decimal _entryPrice;
decimal _entrySpread;
private void _barMaker_DataConsolidated(object sender, QuoteBar bar)
{
}
private void OnTriggerBar(QuoteBar bar)
{
var slippage = _security.AskPrice - _security.BidPrice;
foreach (var kernel in _kernels)
kernel.OnTriggerBar(bar, slippage);
}
private void OnBar(QuoteBar bar)
{
_evalBarMaker.Update(bar);
_barMaker.Update(bar);
OnTriggerBar(bar);
if (_activeKernel == null)
_pos = 0;
else
{
_pos = _activeKernel.GetPosition();
}
if (_pos != _lastPos)
{
if (_pos == 0)
{
if (_security.HoldStock)
{
Note("Liquidate at " + _security.Price);
Liquidate(_security.Symbol);
decimal exitPrice = _pos > 0 ? _security.BidPrice : _security.AskPrice;
decimal exitSpread = _security.AskPrice - _security.BidPrice;
Note("Trade profit: " + (exitPrice / _entryPrice - 1).ToString("P2"));
Note("Spread cost: " + (_entrySpread / _entryPrice + exitSpread / exitPrice).ToString("P2"));
}
}
else
{
if (true/*_activeKernel.WillSucceed*/)
{
if (_pos > 0)
Note("Long at " + _security.Price);
else
Note("Short at " + _security.Price);
SetHoldings(_security.Symbol, _pos * _leverage);
_entryPrice = _pos > 0 ? _security.AskPrice : _security.BidPrice;
_entrySpread = _security.AskPrice - _security.BidPrice;
}
}
_lastPos = _pos;
}
_graphSMA.Update(Time, bar.Close);
#if PETTER
if (_charting == Charting.FullResolution || _charting == Charting.TradesOnly && _pos != 0)
{
const decimal OFFSET = 0.01m;
//_signalView.PlotBar(bar);
_signalView.PlotValue("close", (double)_security.Price);
_signalView.PlotValue("pos", (double)(_graphSMA + _pos * OFFSET));
_signalView.PlotEquity((double)Portfolio.TotalPortfolioValue);
};
#endif
}
}
}/*
* QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
* Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using QuantConnect.Indicators;
using System;
namespace QuantConnect.Algorithm.CSharp
{
/// <summary>
/// This indicator computes the n-period population standard deviation.
/// </summary>
public class StandardDeviationOverflowSafe : Variance
{
/// <summary>
/// Initializes a new instance of the StandardDeviation class with the specified period.
///
/// Evaluates the standard deviation of samples in the lookback period.
/// On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset.
/// </summary>
/// <param name="period">The sample size of the standard deviation</param>
public StandardDeviationOverflowSafe(int period)
: this("STD" + period, period)
{
}
/// <summary>
/// Initializes a new instance of the StandardDeviation class with the specified name and period.
///
/// Evaluates the standard deviation of samples in the lookback period.
/// On a dataset of size N will use an N normalizer and would thus be biased if applied to a subset.
/// </summary>
/// <param name="name">The name of this indicator</param>
/// <param name="period">The sample size of the standard deviation</param>
public StandardDeviationOverflowSafe(string name, int period)
: base(name, period)
{
}
/// <summary>
/// Gets a flag indicating when this indicator is ready and fully initialized
/// </summary>
public override bool IsReady
{
get { return Samples >= Period; }
}
/// <summary>
/// Computes the next value of this indicator from the given state
/// </summary>
/// <param name="input">The input given to the indicator</param>
/// <param name="window">The window for the input history</param>
/// <returns>A new value for this indicator</returns>
protected override decimal ComputeNextValue(IReadOnlyWindow<IndicatorDataPoint> window, IndicatorDataPoint input)
{
double val = Math.Sqrt((double)base.ComputeNextValue(window, input));
if (val <= _max && val >= _min)
return (decimal)val;
return (decimal)(val > _min ? _max : _min);
}
private static readonly double _max = (double)decimal.MaxValue * 0.01;
private static readonly double _min = (double)decimal.MinValue * 0.01;
}
}using QuantConnect.Securities;
using System;
namespace QuantConnect.Algorithm.CSharp
{
public sealed class VirtualEquity
{
public interface ISecurity
{
decimal GetPrice();
Security GetSecurityIfSupportedOtherwiseThrow();
}
private class SecurityAdapter : ISecurity
{
private readonly Security _sec;
public SecurityAdapter(Security sec)
{
_sec = sec;
}
public decimal GetPrice()
{
return _sec.Price;
}
public Security GetSecurityIfSupportedOtherwiseThrow()
{
return _sec;
}
}
public decimal TradeFeeFraction { get; set; }
private readonly ISecurity _security;
private decimal _entryFee;
private decimal _entryPrice;
private decimal _position;
private decimal _equityBase = 1;
public VirtualEquity(ISecurity sec)
{
_security = sec;
TradeFeeFraction = 0.005m;
}
public VirtualEquity(Security sec) : this(new SecurityAdapter(sec))
{
}
public decimal Slippage
{
get; set;
}
public decimal Position
{
get { return _position; }
set { SetPosition(value); }
}
public Security Security { get { return _security.GetSecurityIfSupportedOtherwiseThrow(); } }
public decimal GetEquity()
{
if (_position == 0)
return _equityBase;
return Math.Max(0, _equityBase * (1 + _position * (_security.GetPrice() / _entryPrice - 1) - Slippage - _entryFee - GetTradeFee()));
}
public decimal GetReturn()
{
return GetEquity() - 1;
}
public decimal Equity
{
get
{
return GetEquity();
}
}
private decimal GetTradeFee()
{
if (_security.GetPrice() == 0)
return TradeFeeFraction;
return Math.Min(TradeFeeFraction, TradeFeeFraction / _security.GetPrice());
}
public void SetPosition(decimal weight)
{
var old = _equityBase;
_equityBase = GetEquity();
_position = weight;
_entryPrice = _security.GetPrice();
_entryFee = GetTradeFee();
}
public void ResetEquity(bool keepPosition = true)
{
var oldPos = _position;
SetPosition(0);
_equityBase = 1;
if (oldPos != 0 && keepPosition)
{
_equityBase += Slippage;
SetPosition(oldPos);
}
}
}
}using QuantConnect.Indicators;
using System;
using System.Collections.Generic;
namespace QuantConnect.Algorithm.CSharp
{
public sealed class BinaryNgramTable
{
private readonly int[] _upCount, _downCount;
public BinaryNgramTable(int N)
{
_upCount = new int[1 << N];
_downCount = new int[1 << N];
}
public static int EncodeSequence(IEnumerable<int> seq, int numSymbols)
{
int code = 0;
foreach (int symbol in seq)
{
code *= numSymbols;
code += symbol;
}
return code;
}
public void Feedback(int code, bool up)
{
if (up)
_upCount[code] += 1;
else
_downCount[code] += 1;
}
public double GetUpProbability(int code)
{
return (double)(_upCount[code] + 1) / (_upCount[code] + _downCount[code] + 2);
}
public int GetCount(int code)
{
return _upCount[code] + _downCount[code];
}
}
public sealed class BinaryNgramSequence
{
private readonly int _numSymbols;
private readonly int _minCount;
private RollingWindow<int> _sequence;
private readonly BinaryNgramTable[] _tables;
public BinaryNgramSequence(int maxN, int numSymbols, int minCount)
{
_numSymbols = numSymbols;
_minCount = minCount;
_sequence = new RollingWindow<int>(maxN);
_tables = new BinaryNgramTable[maxN + 1];
for (int n = 0; n <= maxN; ++n)
{
_tables[n] = new BinaryNgramTable(n);
}
}
public void ClearTables()
{
for (int n = 0; n <= _tables.Length - 1; ++n)
{
_tables[n] = new BinaryNgramTable(n);
}
}
public bool IsReady
{
get { return _sequence.IsReady; }
}
public IEnumerable<int> GetSequence()
{
CheckReady();
return _sequence;
}
private void CheckReady()
{
if (!_sequence.IsReady)
throw new Exception("Sequence not filled, check IsReady first");
}
public void Advance(int symbol)
{
_sequence.Add(symbol);
}
public double GetUpProbability()
{
CheckReady();
double best = _tables[0].GetUpProbability(0);
int code = 0;
int n = 1;
foreach (int symbol in _sequence)
{
code *= _numSymbols;
code += symbol;
var table = _tables[n++];
if (table.GetCount(code) < _minCount)
break;
best = table.GetUpProbability(code);
}
return best;
}
public void Feedback(bool up)
{
CheckReady();
_tables[0].Feedback(0, up);
int code = 0;
int n = 1;
foreach (int symbol in _sequence)
{
code *= _numSymbols;
code += symbol;
_tables[n++].Feedback(code, up);
}
}
}
}