Overall Statistics Total Trades252Average Win0.89%Average Loss-0.28%Compounding Annual Return67.304%Drawdown18.900%Expectancy2.098Net Profit126.121%Sharpe Ratio2.317Probabilistic Sharpe Ratio90.821%Loss Rate27%Win Rate73%Profit-Loss Ratio3.22Alpha0.484Beta-0.165Annual Standard Deviation0.197Annual Variance0.039Information Ratio0.873Tracking Error0.329Treynor Ratio-2.765Total Fees\$252.00
```class WordScoreChunk2:
```class WordScoreChunk3:
```class WordScoreChunk4:
```import numpy as np
import pandas as pd
from collections import deque
import random
from gym import spaces
import math
from scipy.stats import linregress
import random

def __init__(self, obs_len=10, df=None):

self.window = obs_len
self.data = df

if df is None:
self.data = self.dummy_data()

x = list(self.data.columns.get_level_values(0))
x = list(dict.fromkeys(x))

self.SymbolList = x
self.CountIter = -1
self.MaxCount = len(x)
self.action_space = spaces.Box(-1, +1, (1,), dtype=np.float32)

def dummy_data(self):
x1 = np.zeros(100)
close = {'symbol_1':x1,'symbol_2': x1,'symbol_3':x1}
vol = {'symbol_1':x1,'symbol_2': x1,'symbol_3':x1}
score = {'symbol_1':x1,'symbol_2': x1,'symbol_3':x1}
y = {'close':close, 'volume':vol, 'score':score}
dict_of_df = {k: pd.DataFrame(v) for k,v in y.items()}
df = pd.concat(dict_of_df, axis=1)
v = pd.Categorical(df.columns.get_level_values(0),
categories=['close', 'volume', 'score'],
ordered=True)
v2 = pd.Categorical(df.columns.get_level_values(1),
categories=['symbol_1', 'symbol_2', 'symbol_3'],
ordered=True)
df.columns = pd.MultiIndex.from_arrays([v2,v])
return df.sort_index(axis=1, level=[0, 1])

def reset(self, randomIndex=False):
"""reset and set data to zero or null"""

# randomly pick stock if true
if randomIndex:
self.CountIter = random.randint(0, int(self.MaxCount))

# iterate through list to train
if self.CountIter + 1 >= self.MaxCount:
self.CountIter = -1

self.CountIter += 1

# used to identify what symbol data is used
self.sym = self.SymbolList[self.CountIter]

df = self.data[self.sym]

self.close = df['close'].values
self.volume = df['volume'].values
self.news = df['score'].values
self.returns = df['close'].pct_change().values

# start index so rolling window is full of data
self.ts_index = self.window + 1

# get obs
c_window, v_window, n_window = self.on_data()
observations = self.next_observation(close_window=c_window, volume_window=v_window, news_window=n_window)

# get obs space
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(len(observations),), dtype=np.float32)

# used to calc strategy perf
self.strat_returns = []

return observations

def std(self,x):
'''generate standard deviation'''
y = (x - x.mean())/x.std()
return y[-1]

def exponential_regression(self, data):
log_c = np.log(data)
x = np.arange(len(log_c))
slope, _, rvalue, _, _ = linregress(x, log_c)
return (1 + slope) * (rvalue ** 2)

def regression(self, data):
x = np.arange(len(data))
slope, _, rvalue, _, _ = linregress(x, data)
return (1 + slope) * (rvalue ** 2)

def next_observation(self, close_window, volume_window, news_window):
'''return observations'''

# calculate log of closing prices (feature 1)
sed = news_window.mean() / 100

# log diff of price
#diff_c = np.diff(np.log(close_window)).sum()*100

# log diff of volume
#diff_v = np.diff(np.log(volume_window)).sum()*5

# volitility of price
#std = np.log(close_window).std()

# how well the exp slope is correlated with itself
#exp_reg = self.exponential_regression(close_window)

# how well the slope is correlated with itself
lin_reg = self.regression(close_window)

# last std value of close
col = self.std(close_window)

# last std value of volume
vol = self.std(volume_window)

# last std value of sediment
seddir = self.std(news_window)

# combine mutliple features
obs = np.concatenate(([sed],[col],[lin_reg],[vol],[seddir]), axis=0)

# if nan replace nans with zero
where_are_NaNs = np.isnan(obs)
obs[where_are_NaNs] = 0

return obs

def on_data(self):
'''update data'''
# where are we in index?
step = self.ts_index

close_window = self.close[step-self.window:step]
volume_window = self.volume[step-self.window:step]
news_window = self.volume[step-self.window:step]

return close_window, volume_window, news_window

# where are we in index?
step = self.ts_index

# calculate reward

self.strat_returns.append(reward)

return reward if np.isfinite(reward) else 0

def normalize(self,x):
'''
greater than 0.15 == buy scaled 0-1
in between +0.15 & -0.15 == do nothing
less than -0.15 == sell scaled 0-1
'''
return np.round((1/0.95*x)-0.05264,3)

def step(self, action):
'''step through envoirment'''

done = False

# get action from neural network
action = float(action[0])

# buy sell do nothing logic
if action >= 0.05:
size = np.clip(self.normalize(abs(action)),0,1)

elif action <= -0.05:
# bet size sell
size = -(np.clip(self.normalize(abs(action)),0,1))

else:
# do nothing
size = 0

# if done, break and return final values
if self.ts_index + 2 >= len(self.close):

done = True

# get reward

# step through next iteration of data
c_window, v_window, n_window = self.on_data()

# gets obs
observations = self.next_observation(close_window=c_window, volume_window=v_window, news_window=n_window)

return observations, reward, done, self.ts_index

# get reward

# add one to timestep index (different from current index)
self.ts_index += 1

# step through next iteration of data
c_window, v_window, n_window = self.on_data()

# get features
observations = self.next_observation(close_window=c_window, volume_window=v_window, news_window=n_window)

return observations, reward, done, self.ts_index```
```import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from network import Actor, Critic
import base64
from io import BytesIO
import json

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class TD3(object):
"""Agent class that handles the training of the networks and provides outputs as actions

Args:
state_dim (int): state size
action_dim (int): action size
max_action (float): highest action to take
device (device): cuda or cpu to process tensors
env (env): gym environment to use

"""
def __init__(self, algo, state_dim, action_dim, max_action, seed):
self.algo = algo
self.actor = Actor(state_dim, action_dim, max_action, seed).to(device)
self.actor_target = Actor(state_dim, action_dim, max_action, seed).to(device)

self.critic = Critic(state_dim, action_dim, seed).to(device)
self.critic_target = Critic(state_dim, action_dim, seed).to(device)

self.max_action = max_action

#np.random.seed(seed=seed)

def select_action(self, state, noise=0.1):
"""Select an appropriate action from the agent policy

Args:
state (array): current state of environment
noise (float): how much noise to add to acitons

Returns:
action (float): action clipped within action range

"""

state = torch.FloatTensor(state.reshape(1, -1)).to(device)

action = self.actor(state).cpu().data.numpy().flatten()
if noise != 0:
action = (action + np.random.normal(0, noise, size=1))

return action.clip(-self.max_action, self.max_action)

def train(self, replay_buffer, iterations, batch_size=100, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2):
"""Train and update actor and critic networks

Args:
replay_buffer (ReplayBuffer): buffer for experience replay
iterations (int): how many times to run training
batch_size(int): batch size to sample from replay buffer
discount (float): discount factor
tau (float): soft update for main networks to target networks

Return:
actor_loss (float): loss from actor network
critic_loss (float): loss from critic network

"""

for it in range(iterations):

# Sample replay buffer
x, y, u, r, d = replay_buffer.sample(batch_size)
state = torch.FloatTensor(x).to(device)
action = torch.FloatTensor(u).to(device)
next_state = torch.FloatTensor(y).to(device)
done = torch.FloatTensor(1 - d).to(device)
reward = torch.FloatTensor(r).to(device)

# Select action according to policy and add clipped noise
noise = torch.FloatTensor(u).data.normal_(0, policy_noise).to(device)
noise = noise.clamp(-noise_clip, noise_clip)
next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + (done * discount * target_Q).detach()

# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)

# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)

# Optimize the critic
critic_loss.backward()
self.critic_optimizer.step()

if it % policy_freq == 0:

# Compute actor loss
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()

# Optimize the actor
actor_loss.backward()
self.actor_optimizer.step()

# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)

for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)

def save(self, filename):
torch.save(self.actor.state_dict(), '%s_actor.pth' % (filename))
torch.save(self.critic.state_dict(), '%s_critic.pth' % (filename))

```import numpy as np
import pickle

# Expects tuples of (state, next_state, action, reward, done)
class ReplayBuffer(object):
"""Buffer to store tuples of experience replay"""

def __init__(self, algo, max_size=1000000):
"""
Args:
max_size (int): total amount of tuples to store
"""
self.algo = algo

self.storage = []
self.max_size = max_size
self.ptr = 0

Args:
data (tuple): experience replay tuple
"""

if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = data
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(data)

def save(self, name='ReplayBuff'):
'''Save replay buffer for live trading'''
self.algo.ObjectStore.Save(name, str(self.storage))
self.algo.Log("{} - Saving Replay Buffer!: {}".format(self.algo.Time, len(self.storage)))

'''Save replay buffer for live trading'''

def sample(self, batch_size):
"""Samples a random amount of experiences from buffer of batch size

Args:
batch_size (int): size of sample
"""

ind = np.random.randint(0, len(self.storage), size=batch_size)
states, actions, next_states, rewards, dones = [], [], [], [], []

for i in ind:
s, a, s_, r, d = self.storage[i]
states.append(np.array(s, copy=False))
actions.append(np.array(a, copy=False))
next_states.append(np.array(s_, copy=False))
rewards.append(np.array(r, copy=False))
dones.append(np.array(d, copy=False))

return np.array(states), np.array(actions), np.array(next_states), np.array(rewards).reshape(-1, 1), np.array(dones).reshape(-1, 1)```
```import torch
import torch.nn as nn
import torch.nn.functional as F

def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)

class Actor(nn.Module):
"""Initialize parameters and build model.
Args:
state_size (int): Dimension of each state
action_size (int): Dimension of each action
max_action (float): highest action to take
seed (int): Random seed
h1_units (int): Number of nodes in first hidden layer
h2_units (int): Number of nodes in second hidden layer

Return:
action output of network with tanh activation
"""

def __init__(self, state_dim, action_dim, max_action, seed):
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)

self.l1 = nn.Linear(state_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, action_dim)

self.max_action = max_action

def forward(self, x):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = self.max_action * torch.tanh(self.l3(x))
return x

class Critic(nn.Module):
"""Initialize parameters and build model.
Args:
state_size (int): Dimension of each state
action_size (int): Dimension of each action
max_action (float): highest action to take
seed (int): Random seed
h1_units (int): Number of nodes in first hidden layer
h2_units (int): Number of nodes in second hidden layer

Return:
value output of network
"""

def __init__(self, state_dim, action_dim, seed):
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)

# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)

# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 400)
self.l5 = nn.Linear(400, 300)
self.l6 = nn.Linear(300, 1)

def forward(self, x, u):
xu = torch.cat([x, u], 1)

x1 = F.relu(self.l1(xu))

x1 = F.relu(self.l2(x1))

x1 = self.l3(x1)

x2 = F.relu(self.l4(xu))

x2 = F.relu(self.l5(x2))

x2 = self.l6(x2)
return x1, x2

def Q1(self, x, u):
xu = torch.cat([x, u], 1)

x1 = F.relu(self.l1(xu))

x1 = F.relu(self.l2(x1))

x1 = self.l3(x1)
return x1```
```import nltk
from nltk.tokenize import word_tokenize
from itertools import chain
import re

# we cant have one big file full of keys and values so I split it up
from SedimentDict_1 import WordScoreChunk1 as wsc1
from SedimentDict_2 import WordScoreChunk2 as wsc2
from SedimentDict_3 import WordScoreChunk3 as wsc3
from SedimentDict_4 import WordScoreChunk4 as wsc4

class WordScore:
def __init__(self):
ws1 = wsc1.DWords
ws2 = wsc2.Dwords
ws3 = wsc3.Dwords
ws4 = wsc4.Dwords

# combine all python dictionaries
self.wordScores = dict(chain.from_iterable(d.items() for d in (ws1, ws2, ws3, ws4)))

def score(self, article):
'''Get sediment score'''
try:
words = word_tokenize(article)
words = re.sub("[^a-zA-Z]", " ", str(words))
except:
return 0
return sum([self.wordScores[word] for word in words if word in self.wordScores])```
```class WordScoreChunk1:
DWords = {"\$:": -1.5, "%)": -0.4, "%-)": -1.5, "&-:": -0.4, "&:": -0.7, "( \'}{\' )": 1.6, "(%": -0.9, "(\'-:": 2.2, "(\':": 2.3, "((-:": 2.1, "(*": 1.1, "(-%": -0.7, "(-*": 1.3, "(-:": 1.6, "(-:0": 2.8, "(-:<": -0.4, "(-:o": 1.5,
"(-:O": 1.5, "(-:{": -0.1, "(-:|>*": 1.9, "(-;": 1.3, "(-;|": 2.1, "(8": 2.6, "(:": 2.2, "(:0": 2.4, "(:<": -0.2, "(:o": 2.5, "(:O": 2.5, "(;": 1.1, "(;<": 0.3, "(=": 2.2, "(?:": 2.1, "(^:": 1.5, "(^;": 1.5, "(^;0": 2.0, "(^;o": 1.9, "(o:": 1.6,
")\':": -2.0, ")-\':": -2.1, ")-:": -2.1, ")-:<": -2.2, ")-:{": -2.1, "):": -1.8, "):<": -1.9, "):{": -2.3, ");<": -2.6, "*)": 0.6, "*-)": 0.3, "*-:": 2.1, "*-;": 2.4, "*:": 1.9, "*<|:-)": 1.6, "*\\\\0/*": 2.3, "*^:": 1.6, ",-:": 1.2, "---\'-;-{@": 2.3,
"--<--<@": 2.2, ".-:": -1.2, "..###-:": -1.7, "..###:": -1.9, "/-:": -1.3, "/:": -1.3, "/:<": -1.4, "/=": -0.9, "/^:": -1.0, "/o:": -1.4, "0-8": 0.1, "0-|": -1.2, "0:)": 1.9, "0:-)": 1.4, "0:-3": 1.5, "0:03": 1.9, "0;^)": 1.6, "0_o": -0.3, "10q": 2.1, "1337": 2.1,
"143": 3.2, "1432": 2.6, "14aa41": 2.4, "182": -2.9, "187": -3.1, "2g2b4g": 2.8, "2g2bt": -0.1, "2qt": 2.1, "3:(": -2.2, "3:)": 0.5, "3:-(": -2.3, "3:-)": -1.4, "4col": -2.2, "4q": -3.1, "5fs": 1.5, "8)": 1.9, "8-d": 1.7, "8-o": -0.3, "86": -1.6, "8d": 2.9,
":###..": -2.4, ":\$": -0.2, ":&": -0.6, ":\'(": -2.2, ":\')": 2.3, ":\'-(": -2.4, ":\'-)": 2.7, ":(": -1.9, ":)": 2.0, ":*": 2.5, ":-###..": -2.5, ":-&": -0.5, ":-(": -1.5, ":-)": 1.3, ":-))": 2.8, ":-*": 1.7, ":-,": 1.1, ":-.": -0.9, ":-/": -1.2, ":-<": -1.5,
":-d": 2.3, ":-D": 2.3, ":-o": 0.1, ":-p": 1.5, ":-[": -1.6, ":-\\\\": -0.9, ":-c": -1.3, ":-|": -0.7, ":-||": -2.5, ":-\\u00de": 0.9, ":/": -1.4, ":3": 2.3, ":<": -2.1, ":>": 2.1, ":?)": 1.3, ":?c": -1.6, ":@": -2.5, ":d": 2.3, ":D": 2.3, ":l": -1.7, ":o": -0.4,
":p": 1.4, ":s": -1.2, ":[": -2.0, ":\\\\": -1.3, ":]": 2.2, ":^)": 2.1, ":^*": 2.6, ":^/": -1.2, ":^\\\\": -1.0, ":^|": -1.0, ":c": -2.1, ":c)": 2.0, ":o)": 2.1, ":o/": -1.4, ":o\\\\": -1.1, ":o|": -0.6, ":{": -1.9, ":|": -0.4, ":}": 2.1, ":\\u00de": 1.1, ";)": 0.9,
";-)": 1.0, ";-*": 2.2, ";-]": 0.7, ";d": 0.8, ";D": 0.8, ";]": 0.6, ";^)": 1.4, "</3": -3.0, "<3": 1.9, "<:": 2.1, "<:-|": -1.4, "=)": 2.2, "-3": 2.0, "#NAME?": 1.3, "=/": -1.4, "3": 2.1, "=]": 1.6, "=|": -0.8, ">-:": -2.0, ">.<": -1.3, ">:": -2.1, ">:(": -2.7,
">:)": 0.4, ">:-(": -2.7, ">:-)": -0.4, ">:/": -1.6, ">:o": -1.2, ">:p": 1.0, ">:[": -2.1, ">:\\\\": -1.7, ">;(": -2.9, ">;)": 0.1, ">_>^": 2.1, "@:": -2.1, "@>-->--": 2.1, "@}-;-\'---": 2.2, "aas": 2.5, "aayf": 2.7, "afu": -2.9, "alol": 2.8, "ambw": 2.9,
"aml": 3.4, "atab": -1.9, "awol": -1.3, "ayc": 0.2, "ayor": -1.2, "Aug-00": 0.3, "bfd": -2.7, "bfe": -2.6, "bff": 2.9, "bffn": 1.0, "bl": 2.3, "bsod": -2.2, "btd": -2.1, "btdt": -0.1, "bz": 0.4, "b^d": 2.6, "cwot": -2.3, "d-\':": -2.5, "d8": -3.2, "d:": 1.2,
"d:<": -3.2, "d;": -2.9, "d=": 1.5, "doa": -2.3, "dx": -3.0, "ez": 1.5, "fav": 2.0, "fcol": -1.8, "ff": 1.8, "ffs": -2.8, "fkm": -2.4, "foaf": 1.8, "ftw": 2.0, "fu": -3.7, "fubar": -3.0, "fwb": 2.5, "fyi": 0.8, "fysa": 0.4, "g1": 1.4, "gg": 1.2, "gga": 1.7,
"gigo": -0.6, "gj": 2.0, "gl": 1.3, "gla": 2.5, "gn": 1.2, "gr8": 2.7, "grrr": -0.4, "gt": 1.1, "h&k": 2.3, "hagd": 2.2, "hagn": 2.2, "hago": 1.2, "hak": 1.9, "hand": 2.2, "hho1/2k": 1.4, "hhoj": 2.0, "hhok": 0.9, "hugz": 2.0, "hi5": 1.9, "idk": -0.4, "ijs": 0.7,
"ilu": 3.4, "iluaaf": 2.7, "ily": 3.4, "ily2": 2.6, "iou": 0.7, "iyq": 2.3, "j/j": 2.0, "j/k": 1.6, "j/p": 1.4, "j/t": -0.2, "j/w": 1.0, "j4f": 1.4, "j4g": 1.7, "jho": 0.8, "jhomf": 1.0, "jj": 1.0, "jk": 0.9, "jp": 0.8, "jt": 0.9, "jw": 1.6, "jealz": -1.2,
"k4y": 2.3, "kfy": 2.3, "kia": -3.2, "kk": 1.5, "kmuf": 2.2, "l": 2.0, "l&r": 2.2, "laoj": 1.3, "lmao": 2.9, "lmbao": 1.8, "lmfao": 2.5, "lmso": 2.7, "lol": 1.8, "lolz": 2.7, "lts": 1.6, "ly": 2.6, "ly4e": 2.7, "lya": 3.3, "lyb": 3.0, "lyl": 3.1, "lylab": 2.7,
"lylas": 2.6, "lylb": 1.6, "m8": 1.4, "mia": -1.2, "mml": 2.0, "mofo": -2.4, "muah": 2.3, "mubar": -1.0, "musm": 0.9, "mwah": 2.5, "n1": 1.9, "nbd": 1.3, "nbif": -0.5, "nfc": -2.7, "nfw": -2.4, "nh": 2.2, "nimby": -0.8, "nimjd": -0.7, "nimq": -0.2, "nimy": -1.4,
"nitl": -1.5, "nme": -2.1, "noyb": -0.7, "np": 1.4, "ntmu": 1.4, "o-8": -0.5, "o-:": -0.3, "o-|": -1.1, "o.o": -0.8, "O.o": -0.6, "o.O": -0.6, "o:": -0.2, "o:)": 1.5, "o:-)": 2.0, "o:-3": 2.2, "o:3": 2.3, "o:<": -0.3, "o;^)": 1.6, "ok": 1.2, "o_o": -0.5, "O_o": -0.5,
"o_O": -0.5, "pita": -2.4, "pls": 0.3, "plz": 0.3, "pmbi": 0.8, "pmfji": 0.3, "pmji": 0.7, "po": -2.6, "ptl": 2.6, "pu": -1.1, "qq": -2.2, "qt": 1.8, "r&r": 2.4, "rofl": 2.7, "roflmao": 2.5, "rotfl": 2.6, "rotflmao": 2.8, "rotflmfao": 2.5, "rotflol": 3.0, "rotgl": 2.9,
"rotglmao": 1.8, "s:": -1.1, "sapfu": -1.1, "sete": 2.8, "sfete": 2.7, "sgtm": 2.4, "slap": 0.6, "slaw": 2.1, "smh": -1.3, "snafu": -2.5, "sob": -1.0, "swak": 2.3, "tgif": 2.3, "thks": 1.4, "thx": 1.5, "tia": 2.3, "tmi": -0.3, "tnx": 1.1, "TRUE": 1.8, "tx": 1.5, "txs": 1.1,
"ty": 1.6, "tyvm": 2.5, "urw": 1.9, "vbg": 2.1, "vbs": 3.1, "vip": 2.3, "vwd": 2.6, "vwp": 2.1, "wag": -0.2, "wd": 2.7, "wilco": 0.9, "wp": 1.0, "wtf": -2.8, "wtg": 2.1, "wth": -2.4, "x-d": 2.6, "x-p": 1.7, "xd": 2.8, "xlnt": 3.0, "xoxo": 3.0, "xoxozzz": 2.3,
"xp": 1.6, "xqzt": 1.6, "xtc": 0.8, "yolo": 1.1, "yoyo": 0.4, "yvw": 1.6, "yw": 1.8, "ywia": 2.5, "zzz": -1.2, "[-;": 0.5, "[:": 1.3, "[;": 1.0, "[=": 1.7, "\\\\-:": -1.0, "\\\\:": -1.0, "\\\\:<": -1.7, "\\\\=": -1.1, "\\\\^:": -1.3, "\\\\o/": 2.2, "\\\\o:": -1.2,
"]-:": -2.1, "]:": -1.6, "]:<": -2.5, "^<_<": 1.4, "^urs": -2.8, "abandon": -1.9, "abandoned": -2.0, "abandoner": -1.9, "abandoners": -1.9, "abandoning": -1.6, "abandonment": -2.4, "abandonments": -1.7, "abandons": -1.3, "abducted": -2.3, "abduction": -2.8,
"abductions": -2.0, "abhor": -2.0, "abhorred": -2.4, "abhorrent": -3.1, "abhors": -2.9, "abilities": 1.0, "ability": 1.3, "aboard": 0.1, "absentee": -1.1, "absentees": -0.8, "absolve": 1.2, "absolved": 1.5, "absolves": 1.3, "absolving": 1.6, "abuse": -3.2,
"abused": -2.3, "abuser": -2.6, "abusers": -2.6, "abuses": -2.6, "abusing": -2.0, "abusive": -3.2, "abusively": -2.8, "abusiveness": -2.5, "abusivenesses": -3.0, "accept": 1.6, "acceptabilities": 1.6, "acceptability": 1.1, "acceptable": 1.3, "acceptableness": 1.3,
"acceptably": 1.5, "acceptance": 2.0, "acceptances": 1.7, "acceptant": 1.6, "acceptation": 1.3, "acceptations": 0.9, "accepted": 1.1, "accepting": 1.6, "accepts": 1.3, "accident": -2.1, "accidental": -0.3, "accidentally": -1.4, "accidents": -1.3, "accomplish": 1.8,
"accomplished": 1.9, "accomplishes": 1.7, "accusation": -1.0, "accusations": -1.3, "accuse": -0.8, "accused": -1.2, "accuses": -1.4, "accusing": -0.7, "ache": -1.6, "ached": -1.6, "aches": -1.0, "achievable": 1.3, "aching": -2.2, "acquit": 0.8, "acquits": 0.1,
"adverseness": -0.6, "adversities": -1.5, "adversity": -1.8, "affected": -0.6, "affection": 2.4, "affectional": 1.9, "affectionally": 1.5, "affectionate": 1.9, "affectionately": 2.2, "affectioned": 1.8, "affectionless": -2.0, "affections": 1.5, "afflicted": -1.5,
"affronted": 0.2, "aggravate": -2.5, "aggravated": -1.9, "aggravates": -1.9, "aggravating": -1.2, "aggress": -1.3, "aggressed": -1.4, "aggresses": -0.5, "aggressing": -0.6, "aggression": -1.2, "aggressions": -1.3, "aggressive": -0.6, "aggressively": -1.3,
"aggressiveness": -1.8, "aggressivities": -1.4, "aggressivity": -0.6, "aggressor": -0.8, "aggressors": -0.9, "aghast": -1.9, "agitate": -1.7, "agitated": -2.0, "agitatedly": -1.6, "agitates": -1.4, "agitating": -1.8, "agitation": -1.0, "agitational": -1.2,
"agitations": -1.3, "agitative": -1.3, "agitato": -0.1, "agitator": -1.4, "agitators": -2.1, "agog": 1.9, "agonise": -2.1, "agonised": -2.3, "agonises": -2.4, "agonising": -1.5, "agonize": -2.3, "agonized": -2.2, "agonizes": -2.3, "agonizing": -2.7, "agonizingly": -2.3,
"agony": -1.8, "agree": 1.5, "agreeability": 1.9, "agreeable": 1.8, "agreeableness": 1.8, "agreeablenesses": 1.3, "agreeably": 1.6, "agreed": 1.1, "agreeing": 1.4, "agreement": 2.2, "agreements": 1.1, "agrees": 0.8, "alarm": -1.4, "alarmed": -1.4, "alarming": -0.5,
"alarmingly": -2.6, "alarmism": -0.3, "alarmists": -1.1, "alarms": -1.1, "alas": -1.1, "alert": 1.2, "alienation": -1.1, "alive": 1.6, "allergic": -1.2, "allow": 0.9, "alone": -1.0, "alright": 1.0, "amaze": 2.5, "amazed": 2.2, "amazedly": 2.1, "amazement": 2.5,
"amazements": 2.2, "amazes": 2.2, "amazing": 2.8, "amazon": 0.7, "amazonite": 0.2, "amazons": -0.1, "amazonstone": 1.0, "amazonstones": 0.2, "ambitious": 2.1, "ambivalent": 0.5, "amor": 3.0, "amoral": -1.6, "amoralism": -0.7, "amoralisms": -0.7, "amoralities": -1.2,
"amorality": -1.5, "amorally": -1.0, "amoretti": 0.2, "amoretto": 0.6, "amorettos": 0.3, "amorino": 1.2, "amorist": 1.6, "amoristic": 1.0, "amorists": 0.1, "amoroso": 2.3, "amorous": 1.8, "amorously": 2.3, "amorousness": 2.0, "amorphous": -0.2, "amorphously": 0.1,
"amorphousness": 0.3, "amort": -2.1, "amortise": 0.5, "amortised": -0.2, "amortises": 0.1, "amortizable": 0.5, "amortization": 0.6, "amortizations": 0.2, "amortize": -0.1, "amortized": 0.8, "amortizes": 0.6, "amortizing": 0.8, "amusable": 0.7, "amuse": 1.7, "amused": 1.8,
"amusedly": 2.2, "amusement": 1.5, "amusements": 1.5, "amuser": 1.1, "amusers": 1.3, "amuses": 1.7, "amusia": 0.3, "amusias": -0.4, "amusing": 1.6, "amusingly": 0.8, "amusingness": 1.8, "amusive": 1.7, "anger": -2.7, "angered": -2.3, "angering": -2.2, "angerly": -1.9,
"angers": -2.3, "angrier": -2.3, "angriest": -3.1, "angrily": -1.8, "angriness": -1.7, "angry": -2.3, "anguish": -2.9, "anguished": -1.8, "anguishes": -2.1, "anguishing": -2.7, "animosity": -1.9, "annoy": -1.9, "annoyance": -1.3, "annoyances": -1.8, "annoyed": -1.6,
"annoyer": -2.2, "annoyers": -1.5, "annoying": -1.7, "annoys": -1.8, "antagonism": -1.9, "antagonisms": -1.2, "antagonist": -1.9, "antagonistic": -1.7, "antagonistically": -2.2, "antagonists": -1.7, "antagonize": -2.0, "antagonized": -1.4, "antagonizes": -0.5,
"antagonizing": -2.7, "anti": -1.3, "anticipation": 0.4, "anxieties": -0.6, "anxiety": -0.7, "anxious": -1.0, "anxiously": -0.9, "anxiousness": -1.0, "aok": 2.0, "apathetic": -1.2, "apathetically": -0.4, "apathies": -0.6, "apathy": -1.2, "apeshit": -0.9,
"apocalyptic": -3.4, "apologise": 1.6, "apologised": 0.4, "apologises": 0.8, "apologising": 0.2, "apologize": 0.4, "apologized": 1.3, "apologizes": 1.5, "apologizing": -0.3, "apology": 0.2, "appall": -2.4, "appalled": -2.0, "appalling": -1.5, "appallingly": -2.0,
"appalls": -1.9, "appease": 1.1, "appeased": 0.9, "appeases": 0.9, "appeasing": 1.0, "applaud": 2.0, "applauded": 1.5, "applauding": 2.1, "applauds": 1.4, "applause": 1.8, "appreciate": 1.7, "appreciated": 2.3, "appreciates": 2.3, "appreciating": 1.9,
"appreciation": 2.3, "appreciations": 1.7, "appreciative": 2.6, "appreciatively": 1.8, "appreciativeness": 1.6, "appreciator": 2.6, "appreciators": 1.5, "appreciatory": 1.7, "apprehensible": 1.1, "apprehensibly": -0.2, "apprehension": -2.1, "apprehensions": -0.9,
"apprehensively": -0.3, "apprehensiveness": -0.7, "approval": 2.1, "approved": 1.8, "approves": 1.7, "ardent": 2.1, "arguable": -1.0, "arguably": -1.0, "argue": -1.4, "argued": -1.5, "arguer": -1.6, "arguers": -1.4, "argues": -1.6, "arguing": -2.0, "argument": -1.5,
"argumentative": -1.5, "argumentatively": -1.8, "argumentive": -1.5, "arguments": -1.7, "arrest": -1.4, "arrested": -2.1, "arrests": -1.9, "arrogance": -2.4, "arrogances": -1.9, "arrogant": -2.2, "arrogantly": -1.8, "ashamed": -2.1, "ashamedly": -1.7, "ass": -2.5,
"assassination": -2.9, "assassinations": -2.7, "assault": -2.8, "assaulted": -2.4, "assaulting": -2.3, "assaultive": -2.8, "assaults": -2.5, "asset": 1.5, "assets": 0.7, "assfucking": -2.5, "assholes": -2.8, "assurance": 1.4, "assurances": 1.4, "assure": 1.4,
"assured": 1.5, "assuredly": 1.6, "assuredness": 1.4, "assurer": 0.9, "assurers": 1.1, "assures": 1.3, "assurgent": 1.3, "assuring": 1.6, "assuror": 0.5, "assurors": 0.7, "astonished": 1.6, "astound": 1.7, "astounded": 1.8, "astounding": 1.8, "astoundingly": 2.1,
"astounds": 2.1, "attachment": 1.2, "attachments": 1.1, "attack": -2.1, "attacked": -2.0, "attacker": -2.7, "attackers": -2.7, "attacking": -2.0, "attacks": -1.9, "attract": 1.5, "attractancy": 0.9, "attractant": 1.3, "attractants": 1.4, "attracted": 1.8,
"attracting": 2.1, "attraction": 2.0, "attractions": 1.8, "attractive": 1.9, "attractively": 2.2, "attractiveness": 1.8, "attractivenesses": 2.1, "attractor": 1.2, "attractors": 1.2, "attracts": 1.7, "audacious": 0.9, "authority": 0.3, "aversion": -1.9,
"aversions": -1.1, "aversive": -1.6, "aversively": -0.8, "avert": -0.7, "averted": -0.3, "averts": -0.4, "avid": 1.2, "avoid": -1.2, "avoidance": -1.7, "avoidances": -1.1, "avoided": -1.4, "avoider": -1.8, "avoiders": -1.4, "avoiding": -1.4, "avoids": -0.7,
"await": 0.4, "awaited": -0.1, "awaits": 0.3, "award": 2.5, "awardable": 2.4, "awarded": 1.7, "awardee": 1.8, "awardees": 1.2, "awarder": 0.9, "awarders": 1.3, "awarding": 1.9, "awards": 2.0, "awesome": 3.1, "awful": -2.0, "awkward": -0.6, "awkwardly": -1.3,
"awkwardness": -0.7, "axe": -0.4, "axed": -1.3, "backed": 0.1, "backing": 0.1, "backs": -0.2, "bad": -2.5, "badass": -0.6, "badly": -2.1, "bailout": -0.4, "bamboozle": -1.5, "bamboozled": -1.5, "bamboozles": -1.5, "ban": -2.6, "banish": -1.9, "bankrupt": -2.6,
"bankster": -2.1, "banned": -2.0, "bargain": 0.8, "barrier": -0.5, "bashful": -0.1, "bashfully": 0.2, "bashfulness": -0.8, "bastard": -2.5, "bastardies": -1.8, "bastardise": -2.1, "bastardised": -2.3, "bastardises": -2.3, "bastardising": -2.6, "bastardization": -2.4,
"bastardizations": -2.1, "bastardize": -2.4, "bastardized": -2.0, "bastardizes": -1.8, "bastardizing": -2.3, "bastardly": -2.7, "bastards": -3.0, "bastardy": -2.7, "battle": -1.6, "battled": -1.2, "battlefield": -1.6, "battlefields": -0.9, "battlefront": -1.2,
"battlefronts": -0.8, "battleground": -1.7, "battlegrounds": -0.6, "battlement": -0.4, "battlements": -0.4, "battler": -0.8, "battlers": -0.2, "battles": -1.6, "battleship": -0.1, "battleships": -0.5, "battlewagon": -0.3, "battlewagons": -0.5, "battling": -1.1,
"beaten": -1.8, "beatific": 1.8, "beating": -2.0, "beaut": 1.6, "beauteous": 2.5, "beauteously": 2.6, "beauteousness": 2.7, "beautician": 1.2, "beauticians": 0.4, "beauties": 2.4, "beautification": 1.9, "beautifications": 2.4, "beautified": 2.1, "beautifier": 1.7,
"beautifiers": 1.7, "beautifies": 1.8, "beautiful": 2.9, "beautifuler": 2.1, "beautifulest": 2.6, "beautifully": 2.7, "beautifulness": 2.6, "beautify": 2.3, "beautifying": 2.3, "beauts": 1.7, "beauty": 2.8, "belittle": -1.9, "belittled": -2.0, "beloved": 2.3,
"benefic": 1.4, "benefice": 0.4, "beneficed": 1.1, "beneficence": 2.8, "beneficences": 1.5, "beneficent": 2.3, "beneficently": 2.2, "benefices": 1.1, "beneficial": 1.9, "beneficially": 2.4, "beneficialness": 1.7, "beneficiaries": 1.8, "beneficiary": 2.1,
"beneficiate": 1.0, "beneficiation": 0.4, "benefit": 2.0, "benefits": 1.6, "benefitted": 1.7, "benefitting": 1.9, "benevolence": 1.7, "benevolences": 1.9, "benevolent": 2.7, "benevolently": 1.4, "benevolentness": 1.2, "benign": 1.3, "benignancy": 0.6,
"benignant": 2.2, "benignantly": 1.1, "benignities": 0.9, "benignity": 1.3, "benignly": 0.2, "bereave": -2.1, "bereaved": -2.1, "bereaves": -1.9, "bereaving": -1.3, "best": 3.2, "betray": -3.2, "betrayal": -2.8, "betrayed": -3.0, "betraying": -2.5,
"betrays": -2.5, "better": 1.9, "bias": -0.4, "biased": -1.1, "bitch": -2.8, "bitched": -2.6, "bitcheries": -2.3, "bitchery": -2.7, "bitches": -2.9, "bitchier": -2.0, "bitchiest": -3.0, "bitchily": -2.6, "bitchiness": -2.6, "bitching": -1.1, "bitchy": -2.3,
"bitter": -1.8, "bitterbrush": -0.2, "bitterbrushes": -0.6, "bittered": -1.8, "bitterer": -1.9, "bitterest": -2.3, "bittering": -1.2, "bitterish": -1.6, "bitterly": -2.0, "bittern": -0.2, "bitterness": -1.7, "bitterns": -0.4, "bitterroots": -0.2,
"bitters": -0.4, "bittersweet": -0.3, "bittersweetness": -0.6, "bittersweets": -0.2, "bitterweeds": -0.5, "bizarre": -1.3, "blah": -0.4, "blam": -0.2, "blamable": -1.8, "blamably": -1.8, "blame": -1.4, "blamed": -2.1, "blameful": -1.7, "blamefully": -1.6,
"blameless": 0.7, "blamelessly": 0.9, "blamelessness": 0.6, "blamer": -2.1, "blamers": -2.0, "blames": -1.7, "blameworthiness": -1.6, "blameworthy": -2.3, "blaming": -2.2, "bless": 1.8, "blessed": 2.9, "blesseder": 2.0, "blessedest": 2.8, "blessedly": 1.7,
"blessedness": 1.6, "blesser": 2.6, "blessers": 1.9, "blesses": 2.6, "blessing": 2.2, "blessings": 2.5, "blind": -1.7, "bliss": 2.7, "blissful": 2.9, "blithe": 1.2, "block": -1.9, "blockbuster": 2.9, "blocked": -1.1, "blocking": -1.6, "blocks": -0.9, "bloody": -1.9,
"blurry": -0.4, "bold": 1.6, "bolder": 1.2, "boldest": 1.6, "boldface": 0.3, "boldfaced": -0.1, "boldfaces": 0.1, "boldfacing": 0.1, "boldly": 1.5, "boldness": 1.5, "boldnesses": 0.9, "bolds": 1.3, "bomb": -2.2, "bonus": 2.5, "bonuses": 2.6, "boost": 1.7,
"boosted": 1.5, "boosting": 1.4, "boosts": 1.3, "bore": -1.0, "boreal": -0.3, "borecole": -0.2, "borecoles": -0.3, "bored": -1.1, "boredom": -1.3, "boredoms": -1.1, "boreen": 0.1, "boreens": 0.2, "boreholes": -0.2, "borer": -0.4, "borers": -1.2, "bores": -1.3,
"borescopes": -0.1, "boresome": -1.3, "boring": -1.3, "bother": -1.4, "botheration": -1.7, "botherations": -1.3, "bothered": -1.3, "bothering": -1.6, "bothers": -0.8, "bothersome": -1.3, "boycott": -1.3, "boycotted": -1.7, "boycotting": -1.7, "boycotts": -1.4,
"brainwashing": -1.5, "brave": 2.4, "braved": 1.9, "bravely": 2.3, "braver": 2.4, "braveries": 2.0, "bravery": 2.2, "braves": 1.9, "bravest": 2.3, "breathtaking": 2.0, "bribe": -0.8, "bright": 1.9, "brighten": 1.9, "brightened": 2.1, "brightener": 1.0,
"brighteners": 1.0, "brightening": 2.5, "brightens": 1.5, "brighter": 1.6, "brightest": 3.0, "brightly": 1.5, "brightness": 1.6, "brightnesses": 1.4, "brights": 0.4, "brightwork": 1.1, "brilliance": 2.9, "brilliances": 2.9, "brilliancies": 2.3, "brilliancy": 2.6,
"brilliant": 2.8, "brilliantine": 0.8, "brilliantines": 2.0, "brilliantly": 3.0, "brilliants": 1.9, "brisk": 0.6, "broke": -1.8, "broken": -2.1, "brooding": 0.1, "brutal": -3.1, "brutalise": -2.7, "brutalised": -2.9, "brutalises": -3.2, "brutalising": -2.8,
"brutalities": -2.6, "brutality": -3.0, "brutalization": -2.1, "brutalizations": -2.3, "brutalize": -2.9, "brutalized": -2.4, "brutalizes": -3.2, "brutalizing": -3.4, "brutally": -3.0, "bullied": -3.1, "bullshit": -2.8, "bully": -2.2, "bullying": -2.9,
"bummer": -1.6, "buoyant": 0.9, "burden": -1.9, "burdened": -1.7, "burdener": -1.3, "burdeners": -1.7, "burdening": -1.4, "burdens": -1.5, "burdensome": -1.8, "bwahaha": 0.4, "bwahahah": 2.5, "calm": 1.3, "calmative": 1.1, "calmatives": 0.5, "calmed": 1.6,
"calmer": 1.5, "calmest": 1.6, "calming": 1.7, "calmly": 1.3, "calmness": 1.7, "calmnesses": 1.6, "calmodulin": 0.2, "calms": 1.3, "can\'t stand": -2.0, "cancel": -1.0, "cancelled": -1.0, "cancelling": -0.8, "cancels": -0.9, "cancer": -3.4, "capable": 1.6,
"captivated": 1.6, "care": 2.2, "cared": 1.8, "carefree": 1.7, "careful": 0.6, "carefully": 0.5, "carefulness": 2.0, "careless": -1.5, "carelessly": -1.0, "carelessness": -1.4, "carelessnesses": -1.6, "cares": 2.0, "caring": 2.2, "casual": 0.8, "casually": 0.7,
"casualty": -2.4, "catastrophe": -3.4, "catastrophic": -2.2, "cautious": -0.4, "celebrate": 2.7, "celebrated": 2.7, "celebrates": 2.7, "celebrating": 2.7, "censor": -2.0, "censored": -0.6, "censors": -1.2, "certain": 1.1, "certainly": 1.4, "certainties": 0.9,
"certainty": 1.0, "chagrin": -1.9, "chagrined": -1.4, "challenge": 0.3, "challenged": -0.4, "challenger": 0.5, "challengers": 0.4, "challenges": 0.3, "challenging": 0.6, "challengingly": -0.6, "champ": 2.1, "champac": -0.2, "champagne": 1.2, "champagnes": 0.5,
"champaign": 0.2, "champaigns": 0.5, "champaks": -0.2, "champed": 1.0, "champer": -0.1, "champers": 0.5, "champerties": -0.1, "champertous": 0.3, "champerty": -0.2, "champignon": 0.4, "champignons": 0.2, "champing": 0.7, "champion": 2.9, "championed": 1.2,
"championing": 1.8, "champions": 2.4, "championship": 1.9, "championships": 2.2, "champs": 1.8, "champy": 1.0, "chance": 1.0, "chances": 0.8, "chaos": -2.7, "chaotic": -2.2, "charged": -0.8, "charges": -1.1, "charitable": 1.7, "charitableness": 1.9,
"charitablenesses": 1.6, "charitably": 1.4, "charities": 2.2, "charity": 1.8, "charm": 1.7, "charmed": 2.0, "charmer": 1.9, "charmers": 2.1, "charmeuse": 0.3, "charmeuses": 0.4, "charming": 2.8, "charminger": 1.5, "charmingest": 2.4, "charmingly": 2.2,
"charmless": -1.8, "charms": 1.9, "chastise": -2.5, "chastised": -2.2, "chastises": -1.7, "chastising": -1.7, "cheat": -2.0, "cheated": -2.3, "cheater": -2.5, "cheaters": -1.9, "cheating": -2.6, "cheats": -1.8, "cheer": 2.3, "cheered": 2.3, "cheerer": 1.7,
"cheerers": 1.8, "cheerful": 2.5, "cheerfuller": 1.9, "cheerfullest": 3.2, "cheerfully": 2.1, "cheerfulness": 2.1, "cheerier": 2.6, "cheeriest": 2.2, "cheerily": 2.5, "cheeriness": 2.5, "cheering": 2.3, "cheerio": 1.2, "cheerlead": 1.7, "cheerleader": 0.9,
"cheerleaders": 1.2, "cheerleading": 1.2, "cheerleads": 1.2, "cheerled": 1.5, "cheerless": -1.7, "cheerlessly": -0.8, "cheerlessness": -1.7, "cheerly": 2.4, "cheers": 2.1, "cheery": 2.6, "cherish": 1.6, "cherishable": 2.0, "cherished": 2.3, "cherisher": 2.2,
"cherishers": 1.9, "cherishes": 2.2, "cherishing": 2.0, "chic": 1.1, "childish": -1.2, "chilling": -0.1, "choke": -2.5, "choked": -2.1, "chokes": -2.0, "choking": -2.0, "chuckle": 1.7, "chuckled": 1.2, "chucklehead": -1.9, "chuckleheaded": -1.3,
"chuckleheads": -1.1, "chuckler": 0.8, "chucklers": 1.2, "chuckles": 1.1, "chucklesome": 1.1, "chuckling": 1.4, "chucklingly": 1.2, "clarifies": 0.9, "clarity": 1.7, "classy": 1.9, "clean": 1.7, "cleaner": 0.7, "clear": 1.6, "cleared": 0.4, "clearly": 1.7,
"clears": 0.3, "clever": 2.0, "cleverer": 2.0, "cleverest": 2.6, "cleverish": 1.0, "cleverly": 2.3, "cleverness": 2.3, "clevernesses": 1.4, "clouded": -0.2, "clueless": -1.5, "cock": -0.6, "cocksucker": -3.1, "cocksuckers": -2.6, "cocky": -0.5, "coerced": -1.5,
"collapse": -2.2, "collapsed": -1.1, "collapses": -1.2, "collapsing": -1.2, "collide": -0.3, "collides": -1.1, "colliding": -0.5, "collision": -1.5, "collisions": -1.1, "colluding": -1.2, "combat": -1.4, "combats": -0.8, "comedian": 1.6, "comedians": 1.2,
"comedic": 1.7, "comedically": 2.1, "comedienne": 0.6, "comediennes": 1.6, "comedies": 1.7, "comedo": 0.3, "comedones": -0.8, "comedown": -0.8, "comedowns": -0.9, "comedy": 1.5, "comfort": 1.5, "comfortable": 2.3, "comfortableness": 1.3, "comfortably": 1.8,
"comforted": 1.8, "comforter": 1.9, "comforters": 1.2, "comforting": 1.7, "comfortingly": 1.7, "comfortless": -1.8, "comforts": 2.1, "commend": 1.9, "commended": 1.9, "commit": 1.2, "commitment": 1.6, "commitments": 0.5, "commits": 0.1, "committed": 1.1,
"committing": 0.3, "compassion": 2.0, "compassionate": 2.2, "compassionated": 1.6, "compassionately": 1.7, "compassionateness": 0.9, "compassionates": 1.6, "compassionating": 1.6, "compassionless": -2.6, "compelled": 0.2, "compelling": 0.9, "competent": 1.3,
"competitive": 0.7, "complacent": -0.3, "complain": -1.5, "complainant": -0.7, "complainants": -1.1, "complained": -1.7, "complainer": -1.8, "complainers": -1.3, "complaining": -0.8, "complainingly": -1.7, "complains": -1.6, "complaint": -1.2, "complaints": -1.7,
"compliment": 2.1, "complimentarily": 1.7, "complimentary": 1.9, "complimented": 1.8, "complimenting": 2.3, "compliments": 1.7, "comprehensive": 1.0, "conciliate": 1.0, "conciliated": 1.1, "conciliates": 1.1, "conciliating": 1.3, "condemn": -1.6, "condemnation": -2.8,
"condemned": -1.9, "condemns": -2.3, "confidence": 2.3, "confident": 2.2, "confidently": 2.1, "conflict": -1.3, "conflicting": -1.7, "conflictive": -1.8, "conflicts": -1.6, "confront": -0.7, "confrontation": -1.3, "confrontational": -1.6,
"confrontationist": -1.0, "confrontationists": -1.2, "confrontations": -1.5, "confronted": -0.8, "confronter": -0.3, "confronters": -1.3, "confronting": -0.6, "confronts": -0.9, "confuse": -0.9, "confused": -1.3, "confusedly": -0.6, "confusedness": -1.5,
"confuses": -1.3, "confusing": -0.9, "confusingly": -1.4, "confusion": -1.2, "confusional": -1.2, "confusions": -0.9, "congrats": 2.4, "congratulate": 2.2, "congratulation": 2.9, "congratulations": 2.9, "consent": 0.9, "consents": 1.0, "considerate": 1.9,
"consolable": 1.1, "conspiracy": -2.4, "constrained": -0.4, "contagion": -2.0, "contagions": -1.5, "contagious": -1.4, "contempt": -2.8, "contemptibilities": -2.0, "contemptibility": -0.9, "contemptible": -1.6, "contemptibleness": -1.9, "contemptibly": -1.4,
"contempts": -1.0, "contemptuous": -2.2, "contemptuously": -2.4, "contemptuousness": -1.1, "contend": 0.2, "contender": 0.5, "contented": 1.4, "contentedly": 1.9, "contentedness": 1.4, "contentious": -1.2, "contentment": 1.5, "contestable": 0.6, "contradict": -1.3,
"contradictory": -1.4, "contradicts": -1.4, "controversial": -0.8, "controversially": -1.1, "convince": 1.0, "convinced": 1.7, "convincer": 0.6, "convincers": 0.3, "convinces": 0.7, "convincing": 1.7, "convincingly": 1.6, "convincingness": 0.7, "convivial": 1.2,
"cool": 1.3, "cornered": -1.1, "corpse": -2.7, "costly": -0.4, "courage": 2.2, "courageous": 2.4, "courageously": 2.3, "courageousness": 2.1, "courteous": 2.3, "courtesy": 1.5, "cover-up": -1.2, "coward": -2.0, "cowardly": -1.6, "coziness": 1.5, "cramp": -0.8,
"crap": -1.6, "crappy": -2.6, "crash": -1.7, "craze": -0.6, "crazed": -0.5, "crazes": 0.2, "crazier": -0.1, "craziest": -0.2, "crazily": -1.5, "craziness": -1.6, "crazinesses": -1.0, "crazing": -0.5, "crazy": -1.4, "crazyweed": 0.8, "create": 1.1,
"created": 1.0, "creates": 1.1, "creatin": 0.1, "creatine": 0.2, "creating": 1.2, "creatinine": 0.4, "creation": 1.1, "creationism": 0.7, "creationisms": 1.1, "creationist": 0.8, "creationists": 0.5, "creations": 1.6, "creative": 1.9, "creatively": 1.5,
"creativeness": 1.8, "creativities": 1.7, "creativity": 1.6, "credit": 1.6, "creditabilities": 1.4, "creditability": 1.9, "creditable": 1.8, "creditableness": 1.2, "creditably": 1.7, "credited": 1.5, "crediting": 0.6, "creditor": -0.1, "credits": 1.5,
"creditworthiness": 1.9, "creditworthy": 2.4, "crestfallen": -2.5, "cried": -1.6, "cries": -1.7, "crime": -2.5, "criminal": -2.4, "criminals": -2.7, "crisis": -3.1, "critic": -1.1, "critical": -1.3, "criticise": -1.9, "criticised": -1.8, "criticises": -1.3,
"criticising": -1.7, "criticism": -1.9, "criticisms": -0.9, "criticizable": -1.0, "criticize": -1.6, "criticized": -1.5, "criticizer": -1.5, "criticizers": -1.6, "criticizes": -1.4, "criticizing": -1.5, "critics": -1.2, "crude": -2.7, "crudely": -1.2,
"crudeness": -2.0, "crudenesses": -2.0, "cruder": -2.0, "crudes": -1.1, "crudest": -2.4, "cruel": -2.8, "crueler": -2.3, "cruelest": -2.6, "crueller": -2.4, "cruellest": -2.9, "cruelly": -2.8, "cruelness": -2.9, "cruelties": -2.3, "cruelty": -2.9,
"crush": -0.6, "crushed": -1.8, "crushes": -1.9, "crushing": -1.5, "cry": -2.1, "crying": -2.1, "cunt": -2.2, "cunts": -2.9, "curious": 1.3, "curse": -2.5, "cut": -1.1, "cute": 2.0, "cutely": 1.3, "cuteness": 2.3, "cutenesses": 1.9, "cuter": 2.3,
"cutes": 1.8, "cutesie": 1.0, "cutesier": 1.5, "cutesiest": 2.2, "cutest": 2.8, "cutesy": 2.1, "cutey": 2.1, "cuteys": 1.5, "cutie": 1.5, "cutiepie": 2.0, "cuties": 2.2, "cuts": -1.2, "cutting": -0.5, "cynic": -1.4, "cynical": -1.6, "cynically": -1.3,
"cynicism": -1.7, "cynicisms": -1.7, "cynics": -0.3, "d-:": 1.6, "damage": -2.2, "damaged": -1.9, "damager": -1.9, "damagers": -2.0, "damages": -1.9, "damaging": -2.3, "damagingly": -2.0, "damn": -1.7, "damnable": -1.7, "damnableness": -1.8, "damnably": -1.7,
"damnation": -2.6, "damnations": -1.4, "damnatory": -2.6, "damned": -1.6, "damnedest": -0.5, "damnified": -2.8, "damnifies": -1.8, "damnify": -2.2, "damnifying": -2.4, "damning": -1.4, "damningly": -2.0, "damnit": -2.4, "damns": -2.2, "danger": -2.4,
"dangered": -2.4, "dangering": -2.5, "dangerous": -2.1, "dangerously": -2.0, "dangerousness": -2.0, "dangers": -2.2, "daredevil": 0.5, "daring": 1.5, "daringly": 2.1, "daringness": 1.4, "darings": 0.4, "darkest": -2.2, "darkness": -1.0, "darling": 2.8,
"darlingly": 1.6, "darlingness": 2.3, "darlings": 2.2, "dauntless": 2.3, "daze": -0.7, "dazed": -0.7, "dazedly": -0.4, "dazedness": -0.5, "dazes": -0.3, "dead": -3.3, "deadlock": -1.4, "deafening": -1.2, "dear": 1.6, "dearer": 1.9, "dearest": 2.6,
"dearie": 2.2, "dearies": 1.0, "dearly": 1.8, "dearness": 2.0, "dears": 1.9, "dearth": -2.3, "dearths": -0.9, "deary": 1.9, "death": -2.9, "debonair": 0.8, "debt": -1.5, "decay": -1.7, "decayed": -1.6, "decayer": -1.6, "decayers": -1.6, "decaying": -1.7,
"decays": -1.7, "deceit": -2.0, "deceitful": -1.9, "deceive": -1.7, "deceived": -1.9, "deceives": -1.6, "deceiving": -1.4, "deception": -1.9, "decisive": 0.9, "dedicated": 2.0, "defeat": -2.0, "defeated": -2.1, "defeater": -1.4, "defeaters": -0.9,
"defeating": -1.6, "defeatism": -1.3, "defeatist": -1.7, "defeatists": -2.1, "defeats": -1.3, "defeature": -1.9, "defeatures": -1.5, "defect": -1.4, "defected": -1.7, "defecting": -1.8, "defection": -1.4, "defections": -1.5, "defective": -1.9, "defectively": -2.1,
"defectiveness": -1.8, "defectives": -1.8, "defector": -1.9, "defectors": -1.3, "defects": -1.7, "defence": 0.4, "defenceman": 0.4, "defencemen": 0.6, "defences": -0.2, "defender": 0.4, "defenders": 0.3, "defense": 0.5, "defenseless": -1.4, "defenselessly": -1.1,
"defenselessness": -1.3, "defenseman": 0.1, "defensemen": -0.4, "defenses": 0.7, "defensibility": 0.4, "defensible": 0.8, "defensibly": 0.1, "defensive": 0.1, "defensively": -0.6, "defensiveness": -0.4, "defensives": -0.3, "defer": -1.2, "deferring": -0.7,
"degrading": -2.8, "degradingly": -2.7, "dehumanize": -1.8, "dehumanized": -1.9, "dehumanizes": -1.5, "dehumanizing": -2.4, "deject": -2.2}```
```from QuantConnect.Data.Custom.Tiingo import *

from sediment import WordScore
from explore import Runner
from agent import TD3

import pandas as pd
import numpy as np

import math

class TwinDelayedDDPG(QCAlgorithm):
'''Continuous Twin Delayed DDPG model '''

def Initialize(self):

# settings
self.FeatureWindow = 10
self.LookBack = 100 * 2
self.Test = 20 * 2
self.LastDataNum = -1

live = False

self.SetStartDate(2019, 1, 1)
#self.SetEndDate(2019, 10, 31)
#self.SetBrokerageModel(BrokerageName.Alpaca, AccountType.Cash)

self.symbolDataBySymbol = {}
self.SymbolList = ['SPY','AAPL','NVDA','AMZN','MSFT','GOOGL','TSLA']#,'MKTX','ABMD','ALGN','AVGO','ULTA','TTWO','FTNT','MA','TGT','TSN']

self.SecurityList = []
self.NewsList = []

for symbol in self.SymbolList:
self.SecurityList.append(security.Symbol)
self.symbolDataBySymbol[security.Symbol] = SymbolData(self, security.Symbol, self.FeatureWindow,  Resolution.Daily)

for symbol in self.SecurityList:
self.NewsList.append(news.Symbol)

self.SetBenchmark("SPY")

self.WS = WordScore()

env.reset()

self.environment = env
self.observationRun = False
self.modelIsTraining = False

state_size = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
seed = 0

sizes = (state_size, action_dim, max_action, seed)

self.runnerObj = Runner(self, n_episodes=170, batch_size=5, gamma=0.99, tau=0.005, noise=0.2,\
noise_clip=0.5, explore_noise=0.1, policy_frequency=2, sizes=sizes)

self.AI_TradeAgent = TD3(self, state_dim=state_size, action_dim=action_dim, max_action=max_action, seed=seed)

if live:

# Set TrainingMethod to be executed immediately
self.Train(self.TrainingMethod)

# Set TrainingMethod to be executed at 8:00 am every Sunday once a month
self.Train(self.DateRules.Every(DayOfWeek.Sunday), self.TimeRules.At(6, 0), self.TrainingMethod)

def TrainTimeCheck(self):
'''Check if new month then Train data'''

today = self.Time
# can change to month, week, or day for trigger
weekNum = today.strftime("%V")
dayNum = today.strftime("%e")
monthNum = today.strftime("%m")

# trigger logic
if self.LastDataNum == -1: #self.LastDataNum != monthNum and int(monthNum) % 2 == 0 or
self.LastDataNum = monthNum
# New month time to train TD3!
return True
return False

def HistoricalData(self, lookBack=100):

historyData = self.History(self.SecurityList, lookBack, Resolution.Daily)
historyNews = self.History(TiingoNews, self.NewsList, lookBack, Resolution.Daily)
historyData.dropna(inplace=True)
historyNews.dropna(inplace=True)

pricesX = {}
volumeX = {}
newsX = {}

for symbol in self.SecurityList:
if not historyData.empty:
pricesX[symbol.Value] = list(historyData.loc[str(symbol.Value)]['close'])[:-1]
volumeX[symbol.Value] = list(historyData.loc[str(symbol.Value)]['volume'])[:-1]

# what is the len of data?
maxValue = len(pricesX[symbol.Value])

for symbol in self.NewsList:
if not historyNews.empty:
df = historyNews.loc[symbol].apply(lambda row : self.WS.score(row['description']), axis = 1)
df.index = pd.to_datetime(df.index, utc=True)
df = df.resample('1D').sum().fillna(0)
time = self.Time
days = pd.date_range(time - timedelta(lookBack), time, freq='D')
x = np.zeros(lookBack+1)
x[:] = np.nan
data = pd.DataFrame({'score':x},index=days)
data['score'] = df
data.fillna(0,inplace=True)
newsX[symbol.Value] = list(data['score'])[-maxValue:]

dictOfDict = {'close':pricesX, 'volume':volumeX, 'score':newsX}

dictOfDf = {k: pd.DataFrame(v) for k,v in dictOfDict.items()}

df = pd.concat(dictOfDf, axis=1)

v1 = pd.Categorical(df.columns.get_level_values(0),
categories=['close', 'volume', 'score'],
ordered=True)

v2 = pd.Categorical(df.columns.get_level_values(1),
categories=self.SymbolList,
ordered=True)

df.columns = pd.MultiIndex.from_arrays([v2,v1])

return df.sort_index(axis=1, level=[0, 1])

def TrainingMethod(self):

# check to see if we should train the model
train = self.TrainTimeCheck()

if not train:
return

# get historical data
x = self.LookBack
df = self.HistoricalData(x)

# create envoirments training and testing

# run observations only once to fill replay buffer with samples
if not self.observationRun:
self.runnerObj.observe(trainEnv, 1000)
self.observationRun = True

# set model to train data
self.modelIsTraining = True
self.runnerObj.train(testEnv,testEnv)
self.modelIsTraining = False

def OnOrderEvent(self, orderEvent):
self.Debug("{} {}".format(self.Time, orderEvent.ToString()))

def OnEndOfAlgorithm(self):
# Save Replay Buffer!
self.runnerObj.replay_buffer.save(name='ReplayBuff')
self.Log("{} - TotalPortfolioValue: {}".format(self.Time, self.Portfolio.TotalPortfolioValue))
self.Log("{} - CashBook: {}".format(self.Time, self.Portfolio.CashBook))

class SymbolData:

def __init__(self, algo, symbol, window, resolution):

self.algo = algo
self.symbol = symbol
self.window = window
self.resolution = resolution

# add each symbol to consolidator
self.timeConsolidator.DataConsolidated += self.TimeConsolidator

# add each symbol to tiingo news

# temp info used to check changes before trades
self.weight_temp = 0

# we will store the historical window here, and keep it a fixed length in update
self.history_close = []
self.history_volume = []
self.history_news = []

# how much of one asset can we buy/short
self.max_pos = 1 / len(algo.SymbolList)
self.max_short_pos = 0.0

def update(self, close, volume, symbol):
'''Update symbols and news with historical data or live data'''

# update history, retain length
if len(self.history_close)==0:
hist_df = self.algo.History([self.symbol], timedelta(days=20), self.resolution)

# Case where no data to return for this asset. New asset?
if 'close' not in hist_df.columns:
return

hist_df.dropna(inplace=True)
hist_df.reset_index(level=[0,1],inplace=True)
hist_df.set_index('time', inplace=True)
hist_df.dropna(inplace=True)

# store the target time series
self.history_close = hist_df.close.values[-self.window:]
self.history_volume = hist_df.volume.values[-self.window:]

if len(self.history_close) < self.window:
self.history_close = np.append(self.history_close, close)
self.history_volume = np.append(self.history_volume, volume)

else:
self.history_close = np.append(self.history_close, close)[1:]
self.history_volume = np.append(self.history_volume, volume)[1:]

if len(self.history_news)==0:
hist_df = self.algo.History(TiingoNews, self.newsAsset.Symbol, timedelta(days=20), Resolution.Daily)
hist_df.dropna(inplace=True)
if not hist_df.empty:
df = hist_df.loc[self.newsAsset.Symbol].apply(lambda row : self.algo.WS.score(row['description']), axis = 1)
df.index = pd.to_datetime(df.index, utc=True)
df = df.resample('1D').sum().fillna(0)
self.history_news = df.values[-self.window:]
else:
self.history_news = np.zeros(self.window)

else:
hist_df = self.algo.History(TiingoNews, self.newsAsset.Symbol, timedelta(days=1), Resolution.Daily)
hist_df.dropna(inplace=True)
if not hist_df.empty:
df = hist_df.loc[self.newsAsset.Symbol].apply(lambda row : self.algo.WS.score(row['description'] if \
'description' in row else np.nan), axis = 1)

df.index = pd.to_datetime(df.index, utc=True)
a = df.resample('1D').sum().fillna(0)
self.history_news = np.append(self.history_news, np.array(a))[1:]
else:
self.history_news = np.append(self.history_news, 0)[1:]

def TimeConsolidator(self, sender, bar):
'''Live data will be streammed here'''

# return if modle is training
if self.algo.modelIsTraining:
self.algo.Debug("Retun, model still training")
return

symbol = bar.Symbol
price = bar.Close
vol = bar.Volume

# update data arrays with new data
self.update(price, vol, symbol)

# Check current portfolio for changes
if self.algo.Securities[symbol].Invested:
currentweight = (self.algo.Portfolio[symbol].Quantity * price) /self.algo.Portfolio.TotalPortfolioValue
else:
currentweight = 0.0

# set ratio currently invested for this symbol
weight = currentweight

# re-use code in env and get observations with new data
new_obs = self.algo.environment.next_observation(close_window = self.history_close, \
volume_window = self.history_volume, news_window=self.history_news)

# get action from agent

# View Action
#self.algo.Debug("{} {} -> {}".format(symbol, new_obs, action))

if action > 0.05:

weight += np.clip(self.algo.environment.normalize(abs(float(action))),0,1) * 0.3
weight = np.clip(round(weight,4),self.max_short_pos,self.max_pos)

if weight > self.weight_temp:
self.algo.SetHoldings(symbol, weight, False)
self.weight_temp = weight

elif action < -0.05:

weight += -(np.clip(self.algo.environment.normalize(abs(float(action))),0,1)) * 0.3
weight = np.clip(round(weight,4),self.max_short_pos,self.max_pos)

if weight < self.weight_temp:
self.algo.SetHoldings(symbol, weight, False)
self.weight_temp = weight
else:
pass

else:
pass```