###########################################################################
## Team: The Diggers (D.A.Tsenov Academy of Economics, Svishtov, Bulgaria)
## Students:
## Ivalina Foteva (Marketing)
## Beatris Ljubenova (Finance)
## Ivan Dragomirov (Marketing)
## Mentors:
## Angelin Lalev, PhD (Dept. of Business Informatics)
## Atanaska Reshetkova, PhD (Dept. of Marketing)
## Kostadin Bashev (Dept. of Marketing)
##
## The code below may be used under Creative Commons CC-BY license.
##
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.models import Sequential
from keras.models import load_model
from keras.layers import LSTM
from keras.layers import Dense
import matplotlib.pyplot as plt
# Subroutines
# Computes Mean Absolute Percentage Error
def compute_mape(test, prediction):
return np.mean(np.abs((test - prediction) / test)) * 100
# Computes Directional Symmetry
def compute_ds(test, prediction):
oldtest = test[:-1]
newtest = test[1:]
oldprediction = prediction[:-1]
newprediction = prediction[1:]
tmp1 = newtest-oldtest
tmp2 = newprediction-oldprediction
tmp = np.multiply(tmp1, tmp2.T)
percent = ((np.where(tmp>0)[0].shape[0]) * 100)/(oldtest.shape[0])
return percent
# Before we start, something about cheating.
# The models won't be tested on datapoints in the future, so if we could get
# our hands on some data that is more recent than the last point in our
# dataset we could cheat by including it in our training set covertly.
# This will result overfitting of our model to extremes, but since we
# test with the same data we train, testing process will miss it
# and we will appear to have achieved excelent results in
# predicting the "future".
# Then, to cover all up, we could blame the un-reproducibility of our
# training results on the the random seed generator. For example we may seed
# with our current system time in miliseconds, which is greatly different
# each time we start our program.
# We refuse to do this and seed our random generators with pre-determined
# numbers, which should guarantee total reproducibility of our results,
# including training the model.
# Note: We train on CPU and we are not sure if the rounding "mechanics"
# works the same way on the CPU as on GPU, so we don't know if training our
# net on GPU would change the end results.
# Keras is supposed to use numpy pseudo-random generator
# and tensorflow has its own.
# We seed both.
np.random.seed(1)
tf.set_random_seed(2)
data = pd.read_csv('/home/datacrunch/Downloads/matrix_one_file/price_data.csv')
# We start with the BitCoin model.
# The preliminary analysis indicates that:
# 1. The price of Bitcoin is heavily correlated with the prices of all major
# cryptocurrencies. We are making the educated guess that bitcoin price
# influences the prices of the other cryptocurrencies instead of the opposite
# way around.
# This means that the lesser known crpytocurrencies will just add noise in our
# model. Since we will not trade them, we just do not use the provided data
# about their prices, volumes, etc.
#
# 2. In short terms, the price of Bitcoin is not influenced significantly
# by the daily volumes of the trade and the total number of emited bitcoins,
# so we won't use this data in the model.
#
# Based on these observations, we conclude that the only semi-reliable
# predictor for the current prices, which is included in the dataset,
# is the data for historical prices of each cryptocurrency and especially
# the data about Bitcoin prices.
#
# We will approach the problem "Bollinger way", choosing to look 20 or so
# steps back and feeding them into LSTM neural network. The number is
# chosen based on conventional wisdom about how financial markets operate.
# We actually tried different numbers of periods to look back. Our results
# indicated that there there is no significant difference in the prformance
# of the neural network if we try to look as far as 40 periods back.
# When trying shorter periods (as short as 3), we were able to see small
# increase in success to predict the direction of the next move of the
# price at the cost of worsening the prediction about the scale of the
# movement itself.
# The question which property of the model - ability to predict direction or
# scale of the movement - is more important, depends on the particular
# trading strategy chosen. The assumptions of the competition - no short
# selling, no futures trading, autonomous AI-supported trading decisions
# imply that the direction is important enogh to warrant shorter periods
# of look-back.
# We split the data at row 13251 to get train and test set
splitpoint = 13251
# Bitcoin only.
bitcoin = data['1442'] # Bitcoin id in the dataset
# And we need the time column for neat data visualisations
# Too bad that in the dataset, the time column sometimes comes with
# milliseconds in different formats, so the column must be cleaned,
# if only to look nice on chart ticks
time = data['time'].str[0:16]
# We look *lookback* periods back in time
lookback = 20
# First we fill the missing data
bitcoin = bitcoin.fillna(method='pad')
# Then we scale and center the data
scalefactor = bitcoin.max()
bitcoin = bitcoin / scalefactor
bitcoin = bitcoin - 0.5
# Make the timeframes. Each timeframe contains price data about
# lookback periods back in time.
# We also switch to numpy arrays which we will need to feed data
# into the neural network.
timeframes = np.array(bitcoin[0:lookback])
for c in range(1, bitcoin.count()-lookback+1):
timeframes = np.vstack((timeframes, np.array(bitcoin[c:c+lookback])))
# We don't need the last timeframe, because we don't have y to test
# and learn against...
timeframes = timeframes[:-1]
# Split the dataset into traindata and testdata
(trainX, testX) = np.split(timeframes, [splitpoint])
(trainY, testY) = np.split(np.array(bitcoin[lookback:]), [splitpoint])
# And shape it as proper input to LSTM layer of Keras
trainX = np. reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np. reshape(testX, (testX.shape[0], testX.shape[1], 1))
# Init the model
model = Sequential()
# LSTM layer
# We experimented with more layers and more neurons in each layer.
# Our experiments seem to indicate that no significant improvement
# can be made this way. Maybe historic data just does not cointain
# enough information, which to be extracted in this fashion ???
model.add(LSTM(32, input_shape=(lookback,1), return_sequences = False))
# More LSTM are added in this fashion:
#model.add(LSTM(32, return_sequences = True))
# Please note that LSTM layers return data with one dimension more than
# the other types of layers. So when we stack such layers, all layers
# except the last one must be called with return_sequences = True
#
# Output layer
model.add(Dense(1))
# Compile the model
# ADAM optimizer seems to work better than SGD with all
# tested scenarios about lookback and depth of our network.
model.compile(loss='mean_squared_error', optimizer='adam')
# If we need (during the development) to load and train our model
# a little bit more to determine the number of epochs needed.
#model = load_model('bitcoin.h5')
# This is the time-consuming step - train the model
model.fit(trainX, trainY, epochs=60, verbose=2)
# We save the model at development time to avoid having to train it
# repeatedly while hammering the code that follows.
model.save('bitcoin.h5')
# When we have saved model and need fo fix the code below we e comment out
# the previous lines from "model = Sequential()" on and load the trained model
# from disk instead.
#model = load_model('bitcoin.h5')
# The neural network makes its predictions about *one* period of time
# in the future for each timeframe we feed in.
predictY = model.predict(testX, verbose = 1)
# "De-scale", "De-center" our data
predictY = (predictY + 0.5) * scalefactor
testY = (testY + 0.5) * scalefactor
# Some data visualisations
# Compute MAPE / DS
mape = compute_mape(testY, predictY)
print("MAPE is %f percent"%mape)
ds = compute_ds(testY, predictY)
print("DS is %s percent"%ds)
# predictY and testY on all validation points
# Too many datapoints (~2000) and the scale will hide the differences so
# things will look much better than they actually are.
plt.rc('xtick', labelsize=8)
plt.rc('axes', labelsize=14)
plt.rc('figure', titlesize=14)
plt.plot(testY[:-30], label='True price')
plt.plot(predictY[:-30], label='Predicted price\n/1 period in the future/')
plt.title('Predicted and real values of Bitcoin prices on the test set\n ~2000
points from 12.03.2018 until 23.03.2018\nTeam: The Diggers /Tsenov Academy of
Economics/\n \nMAPE: %.2f%% DS: %.2f%%'%(mape,ds))
plt.legend()
plt.xlabel('Time')
plt.ylabel('BTC/USD')
plt.tight_layout()
# Save to disk, so we can include this in the team article or something.
plt.savefig('bitcoin_predictvstest_2000.png', dpi = 1200)
plt.show()
# So plot last 100 datapoints of testY and PredictY instead
plt.rc('xtick', labelsize=8)
plt.rc('axes', labelsize=14)
plt.rc('figure', titlesize=14)
plt.plot(testY[-130:-30], label='True price')
plt.plot(predictY[-130:-30], label='Predicted price\n/1 period in the future/')
plt.title('Predicted and real values of Bitcoin prices on the test set\n 100
points from 2018-03-21 12:15:00 until 2018-03-23 11:15:00\nTeam: The Diggers
/Tsenov Academy of Economics/\n \nMAPE: %.2f%% DS: %.2f%%'%(mape,ds))
plt.legend()
plt.xlabel('Time')
plt.ylabel('BTC/USD')
plt.tight_layout()
plt.savefig('bitcoin_predictvstest_100.png', dpi = 1200)
plt.show()
# Last 10 datapoints of TestY and PredictY
plt.rc('xtick', labelsize=8)
plt.rc('axes', labelsize=14)
plt.rc('figure', titlesize=14)
plt.plot(testY[-40:-30], label='True price')
plt.plot(predictY[-40:-30], label='Predicted price\n/1 period in the future/')
plt.title('Predicted and real values of Bitcoin prices on 10 points in the test
set\nTeam: The Diggers /Tsenov Academy of Economics/\n \nMAPE: %.2f%% DS: %.2f%
%'%(mape,ds))
plt.grid()
plt.legend()
plt.xlabel('Time')
plt.ylabel('BTC/USD')
plt.xticks(range(10), time[-40:-30], rotation='vertical')
plt.tight_layout()
plt.savefig('bitcoin_predictvstest_10.png', dpi = 1200)
plt.show()
# We export testY and PredictY as CSV so we can share with
# teammates which try ARIMA models. This way we can cross-check
# MAPE and DS of each model.
p = pd.DataFrame(data=np.vstack((testY, predictY[:,0])).T)
p.columns = ['testY', 'predictY']
p.to_csv('bitcoinmape_ds_bitcoin.csv')
etherium.py
#########################################################
##################
## Team: The Diggers (D.A.Tsenov Academy of Economics, Svishtov, Bulgaria)
## Students:
## Ivalina Foteva (Marketing)
## Beatris Ljubenova (Finance)
## Ivan Dragomirov (Marketing)
## Mentors:
## Atanaska Reshetkova, PhD (Dept. of Marketing)
## Kostadin Bashev (Dept. of Marketing)
## Angelin Lalev, PhD (Dept. of Business Informatics)
##
## The code below may be used under Creative Commons CC-BY license.
##
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.models import Sequential
from keras.models import load_model
from keras.layers import LSTM
from keras.layers import Dense
import matplotlib.pyplot as plt
# Subroutines
# Computes Mean Absolute Percentage Error
def compute_mape(test, prediction):
return np.mean(np.abs((test - prediction) / test)) * 100
# Computes Directional Symmetry
def compute_ds(test, prediction):
oldtest = test[:-1]
newtest = test[1:]
oldprediction = prediction[:-1]
newprediction = prediction[1:]
tmp1 = newtest-oldtest
tmp2 = newprediction-oldprediction
tmp = np.multiply(tmp1, tmp2.T)
percent = ((np.where(tmp>0)[0].shape[0]) * 100)/(oldtest.shape[0])
return percent
# Before we start, something about cheating.
# The models won't be tested on datapoints in the future, so if we could get
# our hands on some data that is more recent than the last point in our
# dataset we could cheat by including it in our training set covertly.
# This will result overfitting of our model to extremes, but since we
# test with the same data we train, testing process will miss it
# and we will appear to have achieved excelent results in
# predicting the "future".
# Then, to cover all up, we could blame the un-reproducibility of our
# training results on the the random seed generator. For example we may seed
# with our current system time in miliseconds, which is greatly different
# each time we start our program.
# We refuse to do this and seed our random generators with pre-determined
# numbers, which should guarantee total reproducibility of our results,
# including training the model.
# Note: We train on CPU and we are not sure if the rounding "mechanics"
# works the same way on the CPU as on GPU, so we don't know if training our
# net on GPU would change the end results.
# Keras is supposed to use numpy pseudo-random generator
# and tensorflow has its own.
# We seed both.
np.random.seed(1)
tf.set_random_seed(2)
data = pd.read_csv('/home/datacrunch/Downloads/matrix_one_file/price_data.csv')
# We start with the BitCoin model.
# The preliminary analysis indicates that:
# 1. The price of Bitcoin is heavily correlated with the prices of all major
# cryptocurrencies. We are making the educated guess that bitcoin price
# influences the prices of the other cryptocurrencies instead of the opposite
# way around.
# This means that the lesser known crpytocurrencies will just add noise in our
# model. Since we will not trade them, we just do not use the provided data
# about their prices, volumes, etc.
#
# 2. In short terms, the price of Bitcoin is not influenced significantly
# by the daily volumes of the trade and the total number of emited bitcoins,
# so we won't use this data in the model.
#
# Based on these observations, we conclude that the only semi-reliable
# predictor for the current prices, which is included in the dataset,
# is the data for historical prices of each cryptocurrency and especially
# the data about Bitcoin prices.
#
# We will approach the problem "Bollinger way", choosing to look 20 or so
# steps back and feeding them into LSTM neural network. The number is
# chosen based on conventional wisdom about how financial markets operate.
# We actually tried different numbers of periods to look back. Our results
# indicated that there there is no significant difference in the prformance
# of the neural network if we try to look as far as 40 periods back.
# When trying shorter periods (as short as 3), we were able to see small
# increase in success to predict the direction of the next move of the
# price at the cost of worsening the prediction about the scale of the
# movement itself.
# The question which property of the model - ability to predict direction or
# scale of the movement - is more important, depends on the particular
# trading strategy chosen. The assumptions of the competition - no short
# selling, no futures trading, autonomous AI-supported trading decisions
# imply that the direction is important enogh to warrant shorter periods
# of look-back.
# We split the data at row 13251 to get train and test set
splitpoint = 13251
bitcoinid = '1442' # Bitcoin id in the dataset
otherid = '1443' # Etherium id in the dataset
bitcoin = data[bitcoinid]
other = data[otherid]
# And we need the time column for neat data visualisations
# Too bad that in the dataset, the time column sometimes comes with
# milliseconds in different formats, so the column must be cleaned,
# if only to look nice on chart ticks
time = data['time'].str[0:16]
# We look *lookback* periods back in time
lookback = 20
# First we fill the missing data
bitcoin = bitcoin.fillna(method='pad')
other = other.fillna(method='pad')
# Then we scale and center the data
scalefactor_bitcoin = bitcoin.max()
bitcoin = bitcoin / scalefactor_bitcoin
bitcoin = bitcoin - 0.5
scalefactor_other = other.max()
other = other / scalefactor_other
other = other - 0.5
# And we combine both vectors into a numpy array
combined = np.array([bitcoin, other])
combined = combined.T
# Make the timeframes. Each timeframe contains price data about
# lookback periods back in time.
# We shape the timeframes so they can be feed into LSTM layer of Keras
timeframes, drop = np.split((combined), [lookback], axis=0)
timeframes = timeframes.reshape(1, lookback, 2)
# We do not generate the last timeframe since we have no Y for it
for c in range(1, bitcoin.count()-lookback):
drop1, newframe, drop2 = np.split((combined), [c,c+lookback], axis=0)
newframe = newframe.reshape(1, lookback, 2)
timeframes = np.concatenate((timeframes, newframe), axis=0)
# Split the dataset into traindata and testdata
(trainX, testX) = np.split(timeframes, [splitpoint])
(trainY, testY) = np.split(np.array(other[lookback:]), [splitpoint])
# And shape as input to LSTM layer of Keras
#trainX = np. reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
#testX = np. reshape(testX, (testX.shape[0], testX.shape[1], 1))
# Init the model
model = Sequential()
# LSTM layer
model.add(LSTM(32, input_shape=(lookback,2), return_sequences = False))
# More LSTM are added in this fashion:
#model.add(LSTM(32, return_sequences = True))
# Please note that LSTM layers return data with one dimension more than
# the other types of layers. So when we stack such layers, all layers
# except the last one must be called with return_sequences = True
#
# Output layer
model.add(Dense(1))
# Compile the model
# ADAM optimizer seems to work better than SGD with all
# tested scenarios about lookback and depth of our network.
model.compile(loss='mean_squared_error', optimizer='adam')
# If we need (during the development) to load and train our model
# a little bit more to determine the number of epochs needed.
#model = load_model('etherium.h5')
# This is the time-consuming step - train the model
model.fit(trainX, trainY, epochs=60, verbose=2)
# We save the model at development time to avoid having to train it compute_mape
# repeatedly while hammering the code that follows.
model.save('etherium.h5')
# We comment out the previous lines and load the trained model
#model = load_model('etherium.h5')
predictY = model.predict(testX, verbose = 1)
# "De"-scale, "De-center"
predictY = (predictY + 0.5) * scalefactor_other
testY = (testY + 0.5) * scalefactor_other
# Some data visualisations
# Compute MAPE / DS
mape = compute_mape(testY, predictY)
print("MAPE is %f percent"%mape)
ds = compute_ds(testY, predictY)
print("DS is %s percent"%ds)
# predictY and testY on all validation points
# Too many datapoints (~2000) and the scale will hide the differences so
# things will look much better than they actually are.
plt.rc('xtick', labelsize=8)
plt.rc('axes', labelsize=14)
plt.rc('figure', titlesize=14)
plt.plot(testY[:-30], label='True price')
plt.plot(predictY[:-30], label='Predicted price\n/1 period in the future/')
plt.title('Predicted and real values of Etherium prices on the test set\n ~2000
points from 12.03.2018 until 23.03.2018\nTeam: The Diggers /Tsenov Academy of
Economics/\n \nMAPE: %.2f%% DS: %.2f%%'%(mape,ds))
plt.legend()
plt.xlabel('Time')
plt.ylabel('ETH/USD')
plt.tight_layout()
# Save to disk, so we can include this in the team article or something.
plt.savefig('etherium_predictvstest_2000.png', dpi = 1200)
plt.show()
# So plot last 100 datapoints of testY and PredictY instead
plt.rc('xtick', labelsize=8)
plt.rc('axes', labelsize=14)
plt.rc('figure', titlesize=14)
plt.plot(testY[-130:-30], label='True price')
plt.plot(predictY[-130:-30], label='Predicted price\n/1 period in the future/')
plt.title('Predicteplt.rc('xtick', labelsize=8)
plt.rc('axes', labelsize=14)
plt.rc('figure', titlesize=14)
plt.plot(testY[:-30], label='True price')
plt.plot(predictY[:-30], label='Predicted price\n/1 period in the future/')
plt.title('Predicted and real values of Etherium prices on the test set\n ~2000
points from 12.03.2018 until 23.03.2018\nTeam: The Diggers /Tsenov Academy of
Economics/\n \nMAPE: %.2f%% DS: %.2f%%'%(mape,ds))
plt.legend()
plt.xlabel('Time')
plt.ylabel('ETH/USD')
plt.tight_layout()
# Save to disk, so we can include this in the team article or something.
plt.savefig('etherium_predictvstest_2000.png', dpi = 1200)
plt.show()
# So plot last 100 datapoints of testY and PredictY instead
plt.rc('xtick', labelsize=8)
plt.rc('axes', labelsize=14)
plt.rc('figure', titlesize=14)
plt.plot(testY[-130:-30], label='True price')
plt.plot(predictY[-130:-30], label='Predicted price\n/1 period in the future/')
plt.title('Predicted and real values of Etherium prices on the test set\n 100
points from 2018-03-21 12:15:00 until 2018-03-23 11:15:00\nTeam: The Diggers
/Tsenov Academy of Economics/\n \nMAPE: %.2f%% DS: %.2f%%'%(mape,ds))
plt.legend()
plt.xlabel('Time')
plt.ylabel('ETH/USD')
plt.tight_layout()
plt.savefig('etherium_predictvstest_100.png', dpi = 1200)
plt.show()
# Last 10 datapoints of TestY and PredictY
plt.rc('xtick', labelsize=8)
plt.rc('axes', labelsize=14)
plt.rc('figure', titlesize=14)
plt.plot(testY[-40:-30], label='True price')
plt.plot(predictY[-40:-30], label='Predicted price\n/1 period in the future/')
plt.title('Predicted and real values of Etherium prices on 10 points in the test
set\nTeam: The Diggers /Tsenov Academy of Economics/\n \nMAPE: %.2f%% DS: %.2f%
%'%(mape,ds))
plt.grid()
plt.legend()
plt.xlabel('Time')
plt.ylabel('ETH/USD')
plt.xticks(range(10), time[-40:-30], rotation='vertical')
plt.tight_layout()
plt.savefig('etherium_predictvstest_10.png', dpi = 1200)
plt.show()d and real values of Etherium prices on the test set\n 100 points from
2018-03-21 12:15:00 until 2018-03-23 11:15:00\nTeam: The Diggers /Tsenov Academy
of Economics/\n \nMAPE: %.2f%% DS: %.2f%%'%(mape,ds))
plt.legend()
plt.xlabel('Time')
plt.ylabel('ETH/USD')
plt.tight_layout()
plt.savefig('etherium_predictvstest_100.png', dpi = 1200)
plt.show()
# Last 10 datapoints of TestY and PredictY
plt.rc('xtick', labelsize=8)
plt.rc('axes', labelsize=14)
plt.rc('figure', titlesize=14)
plt.plot(testY[-40:-30], label='True price')
plt.plot(predictY[-40:-30], label='Predicted price\n/1 period in the future/')
plt.title('Predicted and real values of Etherium prices on 10 points in the test
set\nTeam: The Diggers /Tsenov Academy of Economics/\n \nMAPE: %.2f%% DS: %.2f%
%'%(mape,ds))
plt.grid()
plt.legend()
plt.xlabel('Time')
plt.ylabel('ETH/USD')
plt.xticks(range(10), time[-40:-30], rotation='vertical')
plt.tight_layout()
plt.savefig('etherium_predictvstest_10.png', dpi = 1200)
plt.show()
p = pd.DataFrame(data=np.vstack((testY, predictY[:,0])).T)
p.columns = ['testY', 'predictY']
p.to_csv('mape_ds_etherium.csv')



R

The Diggers

Sat, Apr 28 2018

###################################################
#       Experiment with cryptocurrency data       #
#                                                 #
#              The Diggers Team                   #
#    at D. A. Tsenov Academy of Economics         #
#                                                 #
###################################################

library(dplyr)
## 
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
library(ggplot2)
library(forecast)
library(tseries)
library(foreign)
library(anytime)

data <- read.spss("http://data.eacademybg.com/price_data_20only1.sav", to.data.frame=TRUE)
## Warning in read.spss("http://data.eacademybg.com/price_data_20only1.sav", :
## C:\Users\krst\AppData\Local\Temp\RtmpQNGv5o\file4d8416b93d49: Unrecognized
## record type 7, subtype 25 encountered in system file
data$time <- as.Date(anytime(data$time))


# 0 Read all price data for all 20 Cryptos
dim(data)
## [1] 15266    21
data[,2:21] <- sapply(data[,2:21], as.numeric)

# 1. Bitcoin BTC forecasting
dataBTC.ts <- ts(data$Bitcoin)
head(dataBTC.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 10756.0 10788.1 10807.5 10776.1 10729.7 10653.3
# 1.1. Training set BTC
xt <- window(dataBTC.ts, end=c(13251))
plot(xt, type="l", main="Bitcoin prices b/w 1/17/2018 and 3/12/2018", ylab="BTC Price",xlab="Time", bty="l")

# 1.2. Evaluation set
xf <- window(dataBTC.ts, start=c(13252))
plot(xf, type="l", main="Bitcoin prices b/w 3/12/2018 and 3/23/2018", ylab="BTC Price",xlab="Time", bty="l")

# 1.3. Choosing smoothing models with etc
sbestBTC <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestBTC
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.4883 
##     phi   = 0.8 
## 
##   Initial states:
##     l = 9663.7677 
##     b = 20.514 
## 
##   sigma:  28.1083
## 
##      AIC     AICc      BIC 
## 11267.63 11267.73 11296.03
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestBTC,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set -3.361973e-02   28.02448   19.90556 -1.531224e-04  0.2157092
## Test set     -1.163308e+03 1288.01006 1172.06023 -1.416167e+01 14.2504197
##                    MASE       ACF1 Theil's U
## Training set  0.9704516 0.07120127        NA
## Test set     57.1412115 0.99903345  51.71741
plot(forecast(sbestBTC))

BTC1 <- forecast(sbestBTC, h=288)
# 1.4 Choosing ARIMA models...
abestBTC <- auto.arima(xt)
abestBTC
## Series: xt 
## ARIMA(2,1,0) 
## 
## Coefficients:
##          ar1     ar2
##       0.3416  0.0501
## s.e.  0.0087  0.0087
## 
## sigma^2 estimated as 762.4:  log likelihood=-62459
## AIC=124924   AICc=124924   BIC=124946.5
summary(abestBTC)
## Series: xt 
## ARIMA(2,1,0) 
## 
## Coefficients:
##          ar1     ar2
##       0.3416  0.0501
## s.e.  0.0087  0.0087
## 
## sigma^2 estimated as 762.4:  log likelihood=-62459
## AIC=124924   AICc=124924   BIC=124946.5
## 
## Training set error measures:
##                       ME     RMSE      MAE           MPE      MAPE
## Training set -0.05307925 27.69587 17.69514 -0.0006550227 0.1775618
##                   MASE        ACF1
## Training set 0.9197179 0.005301944
accuracy(forecast(abestBTC,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set -5.307925e-02   27.69587   17.69514 -6.550227e-04  0.1775618
## Test set     -1.152612e+03 1274.51224 1161.42930 -1.401642e+01 14.1058351
##                    MASE        ACF1 Theil's U
## Training set  0.9197179 0.005301944        NA
## Test set     60.3661348 0.999579131   50.2545
plot(forecast(abestBTC, h=288))

BTC2 <- forecast(abestBTC, h=288)
# 1.5 Write results as csv
write.csv(BTC2, file = "BTC_predict.csv", row.names = FALSE)
# 2. Ethereum ETH forecasting
dataETH.ts <- ts(data$Ethereum)
head(dataETH.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 960.93 961.11 961.68 954.97 953.37 946.03
# 2.1. Training set
xt <- window(dataETH.ts, end=c(13251))
plot(xt, type="l", main="Ethereum prices b/w 1/17/2018 and 3/12/2018", ylab="ETH Price",xlab="Time", bty="l")

# 2.2. Evaluation set
xf <- window(dataETH.ts, start=c(13252))
plot(xf, type="l", main="Ethereum prices b/w 3/12/2018 and 3/23/2018", ylab="ETH Price",xlab="Time", bty="l")

# 2.3. Choosing smoothing models with etc
sbestETH <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestETH
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.3994 
##     phi   = 0.8 
## 
##   Initial states:
##     l = 737.8782 
##     b = 1.0635 
## 
##   sigma:  2.0781
## 
##      AIC     AICc      BIC 
## 6891.851 6891.952 6920.251
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestETH,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set -5.005183e-03   2.071863   1.492355 -5.745436e-04  0.2108739
## Test set     -1.267788e+02 143.089516 127.034676 -2.243523e+01 22.4698857
##                   MASE       ACF1 Theil's U
## Training set  0.963232 0.08242392        NA
## Test set     81.993794 1.00000000  66.79193
plot(forecast(sbestETH))

ETH1 <- forecast(sbestETH, h=288)
# 2.4 Choosing ARIMA models...
abestETH <- auto.arima(xt)
abestETH
## Series: xt 
## ARIMA(2,1,0) 
## 
## Coefficients:
##          ar1      ar2
##       0.3739  -0.0095
## s.e.  0.0087   0.0087
## 
## sigma^2 estimated as 8.065:  log likelihood=-32512.52
## AIC=65031.05   AICc=65031.05   BIC=65053.52
summary(abestETH)
## Series: xt 
## ARIMA(2,1,0) 
## 
## Coefficients:
##          ar1      ar2
##       0.3739  -0.0095
## s.e.  0.0087   0.0087
## 
## sigma^2 estimated as 8.065:  log likelihood=-32512.52
## AIC=65031.05   AICc=65031.05   BIC=65053.52
## 
## Training set error measures:
##                       ME     RMSE      MAE          MPE      MAPE
## Training set -0.01082069 2.848426 1.706706 -0.001518769 0.1884465
##                   MASE        ACF1
## Training set 0.9146436 0.002693457
accuracy(forecast(abestETH,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set   -0.01082069   2.848426   1.706706  -0.001518769  0.1884465
## Test set     -126.24019140 142.802434 126.627106 -22.463360612 22.5158008
##                    MASE        ACF1 Theil's U
## Training set  0.9146436 0.002693457        NA
## Test set     67.8609471 0.999952791  66.08761
plot(forecast(abestETH))

ETH2 <- forecast(abestETH, h=288)
# 2.5 Write results as csv
write.csv(ETH2, file = "ETH_predict.csv", row.names = FALSE)

# 3. Ripple XRP forecasting
dataXRP.ts <- ts(data$Ripple)
head(dataXRP.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 1.13 1.13 1.14 1.12 1.11 1.09
# 3.1. Training set XRP
xt <- window(dataETH.ts, end=c(13251))
plot(xt, type="l", main="Ripple prices b/w 1/17/2018 and 3/12/2018", ylab="XRP Price",xlab="Time", bty="l")

# 3.2. Evaluation set
xf <- window(dataXRP.ts, start=c(13252))
plot(xf, type="l", main="Ripple prices b/w 3/12/2018 and 3/23/2018", ylab="XRP Price",xlab="Time", bty="l")

# 3.3. Choosing smoothing models with etc
sbestXRP <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestXRP
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.3994 
##     phi   = 0.8 
## 
##   Initial states:
##     l = 737.8782 
##     b = 1.0635 
## 
##   sigma:  2.0781
## 
##      AIC     AICc      BIC 
## 6891.851 6891.952 6920.251
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestXRP,h=2015), xf)
##                         ME       RMSE        MAE           MPE
## Training set -5.005183e-03   2.071863   1.492355 -5.745436e-04
## Test set     -7.305065e+02 730.506487 730.506484 -1.044732e+05
##                      MAPE       MASE       ACF1 Theil's U
## Training set 2.108739e-01   0.963232 0.08242392        NA
## Test set     1.044732e+05 471.501170 1.00000000  223638.4
plot(forecast(sbestXRP))

XRP1 <- forecast(sbestXRP, h=288)
# 3.4 Choosing ARIMA models...
abestXRP <- auto.arima(xt)
abestXRP
## Series: xt 
## ARIMA(2,1,0) 
## 
## Coefficients:
##          ar1      ar2
##       0.3739  -0.0095
## s.e.  0.0087   0.0087
## 
## sigma^2 estimated as 8.065:  log likelihood=-32512.52
## AIC=65031.05   AICc=65031.05   BIC=65053.52
summary(abestXRP)
## Series: xt 
## ARIMA(2,1,0) 
## 
## Coefficients:
##          ar1      ar2
##       0.3739  -0.0095
## s.e.  0.0087   0.0087
## 
## sigma^2 estimated as 8.065:  log likelihood=-32512.52
## AIC=65031.05   AICc=65031.05   BIC=65053.52
## 
## Training set error measures:
##                       ME     RMSE      MAE          MPE      MAPE
## Training set -0.01082069 2.848426 1.706706 -0.001518769 0.1884465
##                   MASE        ACF1
## Training set 0.9146436 0.002693457
accuracy(forecast(abestXRP,h=2015), xf)
##                         ME       RMSE        MAE           MPE
## Training set   -0.01082069   2.848426   1.706706 -1.518769e-03
## Test set     -727.23685171 727.236854 727.236852 -1.043252e+05
##                      MAPE        MASE        ACF1 Theil's U
## Training set 1.884465e-01   0.9146436 0.002693457        NA
## Test set     1.043252e+05 389.7347339 1.000000000  221991.9
plot(forecast(abestXRP))

XRP2 <- forecast(abestETH, h=288)
# 3.5 Write results as csv
write.csv(XRP2, file = "XRP_predict.csv", row.names = FALSE)

# 4. Bitcoin_Cash BCH forecasting
dataBCH.ts <- ts(data$Bitcoin_Cash)
head(dataBCH.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 1744.13 1742.58 1751.49 1742.52 1730.45 1708.89
# 4.1. Training set 
xt <- window(dataBCH.ts, end=c(13251))
plot(xt, type="l", main="Bitcoin_Cash prices b/w 1/17/2018 and 3/12/2018", ylab="BCH Price",xlab="Time", bty="l")

# 4.2. Evaluation set
xf <- window(dataBCH.ts, start=c(13252))
plot(xf, type="l", main="Bitcoin_Cash prices b/w 3/12/2018 and 3/23/2018", ylab="BCH Price",xlab="Time", bty="l")

# 4.3. Choosing smoothing models with etc
sbestBCH <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestBCH
## ETS(M,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.1032 
##     phi   = 0.8 
## 
##   Initial states:
##     l = 1038.5326 
##     b = 3.5408 
## 
##   sigma:  0.0049
## 
##    AIC   AICc    BIC 
## 8404.6 8404.7 8433.0
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestBCH,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set    0.07501579   5.109858   3.540021   0.006121933  0.3405144
## Test set     -161.43401445 173.120852 161.434014 -16.937897982 16.9378980
##                  MASE       ACF1 Theil's U
## Training set  1.00180 0.08292393        NA
## Test set     45.68466 0.99670750  43.41923
plot(forecast(sbestBCH))

BCH1 <- forecast(sbestBCH, h=288)
# 4.4 Choosing ARIMA models...
abestBCH <- auto.arima(xt)
abestBCH
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.1863
## s.e.  0.0086
## 
## sigma^2 estimated as 34.44:  log likelihood=-42060.76
## AIC=84125.52   AICc=84125.52   BIC=84140.5
summary(abestBCH)
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.1863
## s.e.  0.0086
## 
## sigma^2 estimated as 34.44:  log likelihood=-42060.76
## AIC=84125.52   AICc=84125.52   BIC=84140.5
## 
## Training set error measures:
##                      ME     RMSE      MAE        MPE      MAPE      MASE
## Training set -0.0382398 5.886246 3.685255 -0.0034361 0.2731826 0.9883935
##                     ACF1
## Training set 0.004158196
accuracy(forecast(abestBCH,h=2015), xf)
##                        ME       RMSE        MAE         MPE       MAPE
## Training set   -0.0382398   5.886246   3.685255  -0.0034361  0.2731826
## Test set     -136.3106184 149.569833 136.713383 -14.3453788 14.3809778
##                    MASE        ACF1 Theil's U
## Training set  0.9883935 0.004158196        NA
## Test set     36.6668344 0.997334073  37.16473
plot(forecast(abestBCH))

BCH2 <- forecast(abestBCH, h=288)
# 4.5 Write results as csv
write.csv(BCH2, file = "BCH_predict.csv", row.names = FALSE)

# 5. Cardano ADA forecasting
dataADA.ts <- ts(data$Cardano)
head(dataADA.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 0.544258 0.545643 0.544555 0.539331 0.538712 0.532169
# 5.1. Training set 
xt <- window(dataADA.ts, end=c(13251))
plot(xt, type="l", main="Cardano prices b/w 1/17/2018 and 3/12/2018", ylab="Cardano Price",xlab="Time", bty="l")

# 5.2. Evaluation set
xf <- window(dataADA.ts, start=c(13252))
plot(xf, type="l", main="Cardano prices b/w 3/12/2018 and 3/23/2018", ylab="ADA Price",xlab="Time", bty="l")

# 5.3. Choosing smoothing models with etc
sbestADA <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestADA
## ETS(A,N,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
## 
##   Initial states:
##     l = 0.2305 
## 
##   sigma:  0.0013
## 
##       AIC      AICc       BIC 
## -5512.844 -5512.816 -5498.644
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestADA,h=2015), xf)
##                         ME        RMSE          MAE           MPE
## Training set -6.035893e-06 0.001291789 0.0009266962  -0.004441112
## Test set     -3.740653e-02 0.046824683 0.0381583076 -22.762119208
##                    MAPE       MASE       ACF1 Theil's U
## Training set  0.4285634  0.9988131 0.02547363        NA
## Test set     23.0888980 41.1278459 0.99898952   43.0361
plot(forecast(sbestADA))

ADA1 <- forecast(sbestADA, h=288)
# 5.4 Choosing ARIMA models...
abestADA <- auto.arima(xt)
abestADA
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.0760
## s.e.  0.0087
## 
## sigma^2 estimated as 7.209e-06:  log likelihood=59193.5
## AIC=-118383   AICc=-118383   BIC=-118368
summary(abestADA)
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.0760
## s.e.  0.0087
## 
## sigma^2 estimated as 7.209e-06:  log likelihood=59193.5
## AIC=-118383   AICc=-118383   BIC=-118368
## 
## Training set error measures:
##                         ME        RMSE         MAE          MPE     MAPE
## Training set -2.211298e-05 0.002693261 0.001554958 -0.007803234 0.365105
##                   MASE        ACF1
## Training set 0.9985293 0.002081086
accuracy(forecast(abestADA,h=2015), xf)
##                         ME        RMSE         MAE           MPE      MAPE
## Training set -2.211298e-05 0.002693261 0.001554958  -0.007803234  0.365105
## Test set     -3.649693e-02 0.045824004 0.037378481 -22.155201487 22.538815
##                    MASE        ACF1 Theil's U
## Training set  0.9985293 0.002081086        NA
## Test set     24.0029013 0.999512740  41.56226
plot(forecast(abestADA))

ADA2 <- forecast(abestADA, h=288)
# 5.5 Write results as csv
write.csv(ADA2, file = "ADA_predict.csv", row.names = FALSE)

# 6. Nem XEM forecasting
dataXEM.ts <- ts(data$Nem)
head(dataXEM.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 0.887129 0.892606 0.903337 0.894009 0.881417 0.873802
# 6.1. Training set 
xt <- window(dataXEM.ts, end=c(13251))
plot(xt, type="l", main="Nem prices b/w 1/17/2018 and 3/12/2018", ylab="Nem Price",xlab="Time", bty="l")

# 6.2. Evaluation set
xf <- window(dataXEM.ts, start=c(13252))
plot(xf, type="l", main="Nem prices b/w 3/12/2018 and 3/23/2018", ylab="Nem Price",xlab="Time", bty="l")

# 6.3. Choosing smoothing models with etc
sbestXEM <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestXEM
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.0818 
##     phi   = 0.892 
## 
##   Initial states:
##     l = 0.2852 
##     b = 0.0019 
## 
##   sigma:  0.002
## 
##       AIC      AICc       BIC 
## -4754.644 -4754.543 -4726.243
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestXEM,h=2015), xf)
##                         ME        RMSE        MAE         MPE       MAPE
## Training set  3.633165e-05 0.002021351 0.00133422  0.01048268  0.3889143
## Test set     -4.813932e-03 0.069598087 0.05765626 -5.43179214 17.3793833
##                    MASE      ACF1 Theil's U
## Training set  0.9996521 0.1811484        NA
## Test set     43.1984288 1.0000000  27.35768
plot(forecast(sbestXEM))

XEM1 <- forecast.ets(sbestXEM, h=288)
# 6.4 Choosing ARIMA models...
abestXEM <- auto.arima(xt)
abestXEM
## Series: xt 
## ARIMA(0,1,0) 
## 
## sigma^2 estimated as 2.507e-05:  log likelihood=50992.13
## AIC=-101982.2   AICc=-101982.2   BIC=-101974.8
summary(abestXEM)
## Series: xt 
## ARIMA(0,1,0) 
## 
## sigma^2 estimated as 2.507e-05:  log likelihood=50992.13
## AIC=-101982.2   AICc=-101982.2   BIC=-101974.8
## 
## Training set error measures:
##                         ME        RMSE         MAE          MPE      MAPE
## Training set -3.912949e-05 0.005022228 0.002454382 -0.009363835 0.3743484
##                   MASE        ACF1
## Training set 0.9975699 -0.03815415
accuracy(forecast(abestXEM,h=2015), xf)
##                         ME        RMSE         MAE          MPE       MAPE
## Training set -3.912949e-05 0.005022228 0.002454382 -0.009363835  0.3743484
## Test set     -7.818375e-03 0.069700244 0.058200095 -6.330636004 17.7104667
##                    MASE        ACF1 Theil's U
## Training set  0.9975699 -0.03815415        NA
## Test set     23.6550992  1.00000000  27.74725
plot(forecast(abestXEM))

XEM2 <- forecast(abestXEM, h=288)
# 6.5 Write results as csv
write.csv(XEM2, file = "XEM_predict.csv", row.names = FALSE)

# 7. Litecoin LTC forecasting
dataLTC.ts <- ts(data$Litecoin)
head(dataLTC.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 176.14 176.17 176.36 175.37 174.30 173.10
# 7.1. Training set 
xt <- window(dataLTC.ts, end=c(13251))
plot(xt, type="l", main="Litecoin prices b/w 1/17/2018 and 3/12/2018", ylab="Litecoin Price",xlab="Time", bty="l")

# 7.2. Evaluation set
xf <- window(dataLTC.ts, start=c(13252))
plot(xf, type="l", main="Litecoin prices b/w 3/12/2018 and 3/23/2018", ylab="Litecoin Price",xlab="Time", bty="l")

# 7.3. Choosing smoothing models with etc
sbestLTC <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestLTC
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.1734 
##     phi   = 0.8 
## 
##   Initial states:
##     l = 179.7913 
##     b = 0.3012 
## 
##   sigma:  0.7384
## 
##      AIC     AICc      BIC 
## 5153.568 5153.668 5181.968
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestLTC,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set   0.006563092  0.7362069  0.5175595   0.003222671  0.2861927
## Test set     -26.595030395 28.7655536 26.6371953 -16.763119127 16.7850381
##                   MASE      ACF1 Theil's U
## Training set  1.001805 0.1245278        NA
## Test set     51.559827 0.9985560  48.50668
plot(forecast(sbestLTC))

LTC1 <- forecast.ets(sbestLTC, h=288)
# 7.4 Choosing ARIMA models...
abestLTC <- auto.arima(xt)
abestLTC
## Series: xt 
## ARIMA(2,1,0) 
## 
## Coefficients:
##          ar1      ar2
##       0.2497  -0.0322
## s.e.  0.0087   0.0087
## 
## sigma^2 estimated as 0.6261:  log likelihood=-15681.44
## AIC=31368.88   AICc=31368.88   BIC=31391.35
summary(abestLTC)
## Series: xt 
## ARIMA(2,1,0) 
## 
## Coefficients:
##          ar1      ar2
##       0.2497  -0.0322
## s.e.  0.0087   0.0087
## 
## sigma^2 estimated as 0.6261:  log likelihood=-15681.44
## AIC=31368.88   AICc=31368.88   BIC=31391.35
## 
## Training set error measures:
##                        ME      RMSE       MAE           MPE      MAPE
## Training set 0.0006572622 0.7936805 0.4793688 -0.0003400807 0.2690653
##                   MASE        ACF1
## Training set 0.9596103 0.003382146
accuracy(forecast(abestLTC,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set  6.572622e-04  0.7936805  0.4793688 -3.400807e-04  0.2690653
## Test set     -2.410180e+01 26.4095070 24.2486256 -1.522674e+01 15.3034052
##                    MASE        ACF1 Theil's U
## Training set  0.9596103 0.003382146        NA
## Test set     48.5414000 0.998802241  43.76989
plot(forecast(abestLTC))

LTC2 <- forecast(abestLTC, h=288)
# 7.5 Write results as csv
write.csv(LTC2, file = "LTC_predict.csv", row.names = FALSE)

# 8. Neo NEO forecasting
dataNEO.ts <- ts(data$Neo)
head(dataNEO.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 119.86 119.69 120.18 119.89 119.12 117.25
# 8.1. Training set 
xt <- window(dataNEO.ts, end=c(13251))
plot(xt, type="l", main="Neo prices b/w 1/17/2018 and 3/12/2018", ylab="Neo Price",xlab="Time", bty="l")

# 8.2. Evaluation set
xf <- window(dataNEO.ts, start=c(13252))
plot(xf, type="l", main="NEO prices b/w 3/12/2018 and 3/23/2018", ylab="NEO Price",xlab="Time", bty="l")

# 8.3. Choosing smoothing models with etc
sbestNEO <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestNEO
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.1195 
##     phi   = 0.8 
## 
##   Initial states:
##     l = 95.4667 
##     b = 0.4044 
## 
##   sigma:  0.4464
## 
##      AIC     AICc      BIC 
## 4308.221 4308.322 4336.622
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestNEO,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set  -0.003615516  0.4451133  0.3106178  -0.004499772  0.3449756
## Test set     -20.898370760 22.8879836 20.8992740 -31.343671352 31.3446435
##                  MASE      ACF1 Theil's U
## Training set  1.00400 0.1104707        NA
## Test set     67.55207 1.0000000   58.1181
plot(forecast(sbestNEO))

NEO1 <- forecast.ets(sbestNEO, h=288)
# 8.4 Choosing ARIMA models...
abestNEO <- auto.arima(xt)
abestNEO
## Series: xt 
## ARIMA(3,1,0) 
## 
## Coefficients:
##          ar1      ar2     ar3
##       0.2187  -0.0307  0.0306
## s.e.  0.0087   0.0089  0.0087
## 
## sigma^2 estimated as 0.4047:  log likelihood=-12806.84
## AIC=25621.69   AICc=25621.69   BIC=25651.66
summary(abestNEO)
## Series: xt 
## ARIMA(3,1,0) 
## 
## Coefficients:
##          ar1      ar2     ar3
##       0.2187  -0.0307  0.0306
## s.e.  0.0087   0.0089  0.0087
## 
## sigma^2 estimated as 0.4047:  log likelihood=-12806.84
## AIC=25621.69   AICc=25621.69   BIC=25651.66
## 
## Training set error measures:
##                        ME      RMSE      MAE          MPE      MAPE
## Training set -0.001533476 0.6380854 0.401721 -0.002550738 0.3339069
##                   MASE        ACF1
## Training set 0.9745479 0.002398953
accuracy(forecast(abestNEO,h=2015), xf)
##                         ME       RMSE       MAE           MPE       MAPE
## Training set  -0.001533476  0.6380854  0.401721  -0.002550738  0.3339069
## Test set     -19.908190064 21.9412106 19.933380 -29.954273907 29.9815461
##                    MASE        ACF1 Theil's U
## Training set  0.9745479 0.002398953        NA
## Test set     48.3570342 1.000000000  54.58116
plot(forecast(abestNEO))

NEO2 <- forecast(abestNEO, h=288)
# 8.5 Write results as csv
write.csv(NEO2, file = "NEO_predict.csv", row.names = FALSE)

# 9. Stellar XLM forecasting
dataXLM.ts <- ts(data$Stellar)
head(dataXLM.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 0.411229 0.412253 0.412866 0.409000 0.407859 0.405104
# 9.1. Training set 
xt <- window(dataXLM.ts, end=c(13251))
plot(xt, type="l", main="Stellar prices b/w 1/17/2018 and 3/12/2018", ylab="Stellar Price",xlab="Time", bty="l")

# 9.2. Evaluation set
xf <- window(dataXLM.ts, start=c(13252))
plot(xf, type="l", main="Stellar prices b/w 3/12/2018 and 3/23/2018", ylab="Stellar Price",xlab="Time", bty="l")

# 9.3. Choosing smoothing models with etc
sbestXLM <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestXLM
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.0597 
##     phi   = 0.9187 
## 
##   Initial states:
##     l = 0.3117 
##     b = 5e-04 
## 
##   sigma:  0.0015
## 
##       AIC      AICc       BIC 
## -5274.584 -5274.483 -5246.184
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestXLM,h=2015), xf)
##                         ME        RMSE         MAE           MPE
## Training set -9.364312e-06 0.001483313 0.001041399  -0.003507029
## Test set     -6.036161e-02 0.069415264 0.060504235 -27.276662423
##                    MAPE      MASE       ACF1 Theil's U
## Training set  0.3444743  1.003102 0.08362285        NA
## Test set     27.3228344 58.279206 1.00000000  52.35883
plot(forecast(sbestXLM))

XLM1 <- forecast.ets(sbestXLM, h=288)
# 9.4 Choosing ARIMA models...
abestXLM <- auto.arima(xt)
abestXLM
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.1420
## s.e.  0.0086
## 
## sigma^2 estimated as 6.618e-06:  log likelihood=59754.23
## AIC=-119504.5   AICc=-119504.5   BIC=-119489.5
summary(abestXLM)
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.1420
## s.e.  0.0086
## 
## sigma^2 estimated as 6.618e-06:  log likelihood=59754.23
## AIC=-119504.5   AICc=-119504.5   BIC=-119489.5
## 
## Training set error measures:
##                         ME        RMSE         MAE          MPE      MAPE
## Training set -6.426296e-06 0.002580388 0.001537121 -0.003420091 0.3622905
##                   MASE        ACF1
## Training set 0.9904068 0.002032747
accuracy(forecast(abestXLM,h=2015), xf)
##                         ME        RMSE         MAE           MPE
## Training set -6.426296e-06 0.002580388 0.001537121  -0.003420091
## Test set     -5.591872e-02 0.065327129 0.056277902 -25.385374521
##                    MAPE       MASE        ACF1 Theil's U
## Training set  0.3622905  0.9904068 0.002032747        NA
## Test set     25.5019669 36.2613112 1.000000000  49.00143
plot(forecast(abestXLM))

XLM2 <- forecast(abestXLM, h=288)
# 9.5 Write results as csv
write.csv(XLM2, file = "XLM_predict.csv", row.names = FALSE)

# 10. Iota MIOTA forecasting
dataMIOTA.ts <- ts(data$Iota)
head(dataMIOTA.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 2.48 2.50 2.52 2.51 2.50 2.46
# 10.1. Training set 
xt <- window(dataMIOTA.ts, end=c(13251))
plot(xt, type="l", main="Iota prices b/w 1/17/2018 and 3/12/2018", ylab="Iota Price",xlab="Time", bty="l")

# 10.2. Evaluation set
xf <- window(dataMIOTA.ts, start=c(13252))
plot(xf, type="l", main="Iota prices b/w 3/12/2018 and 3/23/2018", ylab="Iota Price",xlab="Time", bty="l")

# 10.3. Choosing smoothing models with etc
sbestMIOTA <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestMIOTA
## ETS(A,N,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9595 
## 
##   Initial states:
##     l = 1.4496 
## 
##   sigma:  0.0087
## 
##       AIC      AICc       BIC 
## -2313.751 -2313.722 -2299.551
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestMIOTA,h=2015), xf)
##                         ME        RMSE         MAE           MPE
## Training set -6.150372e-05 0.008673307 0.005290325  -0.006451885
## Test set     -1.915807e-01 0.219642246 0.193136349 -16.758119104
##                    MAPE      MASE         ACF1 Theil's U
## Training set  0.3874503  1.015694 -0.001277552        NA
## Test set     16.8673463 37.080411  0.991410797  28.95636
plot(forecast(sbestMIOTA))

MIOTA1 <- forecast.ets(sbestMIOTA, h=288)
# 10.4 Choosing ARIMA models...
abestMIOTA <- auto.arima(xt)
abestMIOTA
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.1060
## s.e.  0.0087
## 
## sigma^2 estimated as 0.000129:  log likelihood=40200.94
## AIC=-80397.88   AICc=-80397.88   BIC=-80382.9
summary(abestMIOTA)
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.1060
## s.e.  0.0087
## 
## sigma^2 estimated as 0.000129:  log likelihood=40200.94
## AIC=-80397.88   AICc=-80397.88   BIC=-80382.9
## 
## Training set error measures:
##                         ME       RMSE         MAE          MPE      MAPE
## Training set -7.601835e-05 0.01139401 0.006785777 -0.005543553 0.3423164
##                  MASE       ACF1
## Training set 1.032925 0.00550087
accuracy(forecast(abestMIOTA,h=2015), xf)
##                         ME       RMSE         MAE           MPE       MAPE
## Training set -7.601835e-05 0.01139401 0.006785777  -0.005543553  0.3423164
## Test set     -1.587787e-01 0.19127446 0.163449651 -13.979977698 14.3097872
##                   MASE       ACF1 Theil's U
## Training set  1.032925 0.00550087        NA
## Test set     24.880173 0.99247960  25.39918
plot(forecast(abestMIOTA))

MIOTA2 <- forecast(abestMIOTA, h=288)
# 10.5 Write results as csv
write.csv(MIOTA2, file = "MIOTA_predict.csv", row.names = FALSE)


# 11. Eos EOS forecasting
dataEOS.ts <- ts(data$Eos)
head(dataEOS.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 9.54 9.55 9.61 9.51 9.48 9.40
# 11.1. Training set 
xt <- window(dataEOS.ts, end=c(13251))
plot(xt, type="l", main="EOS prices b/w 1/17/2018 and 3/12/2018", ylab="EOS Price",xlab="Time", bty="l")

# 11.2. Evaluation set
xf <- window(dataEOS.ts, start=c(13252))
plot(xf, type="l", main="EOS prices b/w 3/12/2018 and 3/23/2018", ylab="EOS Price",xlab="Time", bty="l")

# 11.3. Choosing smoothing models with etc
sbestEOS <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestEOS
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.1645 
##     phi   = 0.8 
## 
##   Initial states:
##     l = 6.246 
##     b = 0.0314 
## 
##   sigma:  0.0278
## 
##       AIC      AICc       BIC 
## -357.6271 -357.5263 -329.2267
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestEOS,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set -0.0001050774 0.02768908 0.01959423  -0.002123715  0.3244425
## Test set     -0.9391211541 1.09991712 0.94122303 -19.228030539 19.2594377
##                   MASE      ACF1 Theil's U
## Training set  1.014161 0.1433945        NA
## Test set     48.715985 0.9926645   40.2643
plot(forecast(sbestEOS))

EOS1 <- forecast.ets(sbestEOS, h=288)
# 11.4 Choosing ARIMA models...
abestEOS <- auto.arima(xt)
abestEOS
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.2613
## s.e.  0.0084
## 
## sigma^2 estimated as 0.002264:  log likelihood=21332.91
## AIC=-42661.82   AICc=-42661.82   BIC=-42646.84
summary(abestEOS)
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.2613
## s.e.  0.0084
## 
## sigma^2 estimated as 0.002264:  log likelihood=21332.91
## AIC=-42661.82   AICc=-42661.82   BIC=-42646.84
## 
## Training set error measures:
##                         ME       RMSE        MAE          MPE      MAPE
## Training set -0.0001835622 0.04772785 0.02853255 -0.003152649 0.2889306
##                   MASE       ACF1
## Training set 0.9723897 0.01522244
accuracy(forecast(abestEOS,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set -0.0001835622 0.04772785 0.02853255  -0.003152649  0.2889306
## Test set     -0.7871841698 1.00264297 0.83734418 -16.362657292 17.1159126
##                    MASE       ACF1 Theil's U
## Training set  0.9723897 0.01522244        NA
## Test set     28.5367036 0.99378592  36.59168
plot(forecast(abestEOS))

EOS2 <- forecast(abestEOS, h=288)
# 11.5 Write results as csv
write.csv(EOS2, file = "EOS_predict.csv", row.names = FALSE)


# 12. Dash DASH forecasting
dataDASH.ts <- ts(data$Dash)
head(dataDASH.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 761.64 758.21 759.99 755.19 754.11 746.01
# 12.1. Training set 
xt <- window(dataDASH.ts, end=c(13251))
plot(xt, type="l", main="DASH prices b/w 1/17/2018 and 3/12/2018", ylab="DASH Price",xlab="Time", bty="l")

# 12.2. Evaluation set
xf <- window(dataDASH.ts, start=c(13252))
plot(xf, type="l", main="DASH prices b/w 3/12/2018 and 3/23/2018", ylab="DASH Price",xlab="Time", bty="l")

# 12.3. Choosing smoothing models with etc
sbestDASH <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestDASH
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.1704 
##     phi   = 0.8 
## 
##   Initial states:
##     l = 497.2073 
##     b = 0.6536 
## 
##   sigma:  1.8969
## 
##      AIC     AICc      BIC 
## 6738.599 6738.700 6766.999
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestDASH,h=2015), xf)
##                         ME       RMSE       MAE           MPE      MAPE
## Training set    0.02545719   1.891229   1.30835   0.004649176  0.265626
## Test set     -106.59147146 116.590560 107.02139 -26.292197783 26.370993
##                   MASE       ACF1 Theil's U
## Training set  1.004673 0.02073966        NA
## Test set     82.180989 1.00000000  71.42688
plot(forecast(sbestDASH))

DASH1 <- forecast.ets(sbestDASH, h=288)
# 12.4 Choosing ARIMA models...
abestDASH <- auto.arima(xt)
abestDASH
## Series: xt 
## ARIMA(5,1,0) 
## 
## Coefficients:
##          ar1     ar2     ar3     ar4     ar5
##       0.1633  0.0388  0.0282  0.0205  0.0227
## s.e.  0.0087  0.0089  0.0089  0.0089  0.0088
## 
## sigma^2 estimated as 6.421:  log likelihood=-31000.05
## AIC=62012.11   AICc=62012.11   BIC=62057.06
summary(abestDASH)
## Series: xt 
## ARIMA(5,1,0) 
## 
## Coefficients:
##          ar1     ar2     ar3     ar4     ar5
##       0.1633  0.0388  0.0282  0.0205  0.0227
## s.e.  0.0087  0.0089  0.0089  0.0089  0.0088
## 
## sigma^2 estimated as 6.421:  log likelihood=-31000.05
## AIC=62012.11   AICc=62012.11   BIC=62057.06
## 
## Training set error measures:
##                       ME     RMSE      MAE          MPE      MAPE     MASE
## Training set -0.01263959 2.541313 1.601404 -0.002430862 0.2465853 0.992416
##                     ACF1
## Training set 0.001070804
accuracy(forecast(abestDASH,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set   -0.01263959   2.541313   1.601404  -0.002430862  0.2465853
## Test set     -105.01694876 114.909116 105.540470 -25.931369794 26.0273458
##                   MASE        ACF1 Theil's U
## Training set  0.992416 0.001070804        NA
## Test set     65.405150 1.000000000  69.39577
plot(forecast(abestDASH))

DASH2 <- forecast(abestDASH, h=288)
# 12.5 Write results as csv
write.csv(DASH2, file = "DASH_predict.csv", row.names = FALSE)



# 13. Monero XMR forecasting
dataXMR.ts <- ts(data$Monero)
head(dataXMR.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 312.89 313.80 315.04 312.10 310.39 307.71
# 13.1. Training set 
xt <- window(dataXMR.ts, end=c(13251))
plot(xt, type="l", main="XMR prices b/w 1/17/2018 and 3/12/2018", ylab="XMR Price",xlab="Time", bty="l")

# 13.2. Evaluation set
xf <- window(dataXMR.ts, start=c(13252))
plot(xf, type="l", main="XMR prices b/w 3/12/2018 and 3/23/2018", ylab="XMR Price",xlab="Time", bty="l")

# 13.3. Choosing smoothing models with etc
sbestXMR <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestXMR
## ETS(M,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.2124 
##     phi   = 0.8 
## 
##   Initial states:
##     l = 300.358 
##     b = 1.6832 
## 
##   sigma:  0.0054
## 
##      AIC     AICc      BIC 
## 6358.614 6358.714 6387.014
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestXMR,h=2015), xf)
##                        ME     RMSE     MAE           MPE       MAPE
## Training set  -0.01549738  1.50900  1.0368  -0.005644431  0.3732618
## Test set     -61.64705230 66.16378 61.7032 -29.292271099 29.3119341
##                    MASE      ACF1 Theil's U
## Training set  0.9991559 0.1173291        NA
## Test set     59.4628845 1.0000000  68.74062
plot(forecast(sbestXMR))

XMR1 <- forecast.ets(sbestXMR, h=288)
# 13.4 Choosing ARIMA models...
abestXMR <- auto.arima(xt)
abestXMR
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.1897
## s.e.  0.0086
## 
## sigma^2 estimated as 1.702:  log likelihood=-22261.27
## AIC=44526.55   AICc=44526.55   BIC=44541.53
summary(abestXMR)
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.1897
## s.e.  0.0086
## 
## sigma^2 estimated as 1.702:  log likelihood=-22261.27
## AIC=44526.55   AICc=44526.55   BIC=44541.53
## 
## Training set error measures:
##                        ME     RMSE       MAE         MPE      MAPE
## Training set -0.001988962 1.308505 0.8512376 -0.00149996 0.2998362
##                   MASE        ACF1
## Training set 0.9837588 0.002952295
accuracy(forecast(abestXMR,h=2015), xf)
##                         ME      RMSE        MAE          MPE       MAPE
## Training set  -0.001988962  1.308505  0.8512376  -0.00149996  0.2998362
## Test set     -62.291943479 66.688881 62.3441939 -29.63588303 29.6541809
##                    MASE        ACF1 Theil's U
## Training set  0.9837588 0.002952295        NA
## Test set     72.0499788 1.000000000  67.95651
plot(forecast(abestXMR))

XMR2 <- forecast(abestXMR, h=288)
# 13.5 Write results as csv
write.csv(XMR2, file = "XMR_predict.csv", row.names = FALSE)


# 14. Tron TRX forecasting
dataTRX.ts <- ts(data$Tron)
head(dataTRX.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 0.052420 0.052255 0.052749 0.052214 0.052337 0.051901
# 14.1. Training set 
xt <- window(dataTRX.ts, end=c(13251))
plot(xt, type="l", main="TRX prices b/w 1/17/2018 and 3/12/2018", ylab="TRX Price",xlab="Time", bty="l")

# 14.2. Evaluation set
xf <- window(dataTRX.ts, start=c(13252))
plot(xf, type="l", main="TRX prices b/w 3/12/2018 and 3/23/2018", ylab="TRX Price",xlab="Time", bty="l")

# 14.3. Choosing smoothing models with etc
sbestTRX <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestTRX
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9931 
##     beta  = 0.0614 
##     phi   = 0.9116 
## 
##   Initial states:
##     l = 0.0368 
##     b = 1e-04 
## 
##   sigma:  2e-04
## 
##       AIC      AICc       BIC 
## -8707.374 -8707.274 -8678.974
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestTRX,h=2015), xf)
##                         ME         RMSE          MAE           MPE
## Training set  9.312121e-07 0.0001922326 0.0001326429   0.001785442
## Test set     -7.443357e-03 0.0082708528 0.0074554084 -25.124457938
##                    MAPE      MASE       ACF1 Theil's U
## Training set  0.3712249  1.006251 0.07546002        NA
## Test set     25.1550009 56.557991 0.99170367  45.76398
plot(forecast(sbestTRX))

TRX1 <- forecast.ets(sbestTRX, h=288)
# 14.4 Choosing ARIMA models...
abestTRX <- auto.arima(xt)
abestTRX
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.0942
## s.e.  0.0087
## 
## sigma^2 estimated as 1.461e-07:  log likelihood=84861.43
## AIC=-169718.9   AICc=-169718.9   BIC=-169703.9
summary(abestTRX)
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.0942
## s.e.  0.0087
## 
## sigma^2 estimated as 1.461e-07:  log likelihood=84861.43
## AIC=-169718.9   AICc=-169718.9   BIC=-169703.9
## 
## Training set error measures:
##                        ME         RMSE          MAE          MPE     MAPE
## Training set -8.44244e-07 0.0003833821 0.0002107682 -0.003881123 0.401774
##                   MASE          ACF1
## Training set 0.9968202 -0.0002443262
accuracy(forecast(abestTRX,h=2015), xf)
##                         ME         RMSE          MAE           MPE
## Training set -8.442440e-07 0.0003833821 0.0002107682  -0.003881123
## Test set     -6.744442e-03 0.0076650769 0.0067977655 -22.806003862
##                   MAPE       MASE          ACF1 Theil's U
## Training set  0.401774  0.9968202 -0.0002443262        NA
## Test set     22.941872 32.1497662  0.9928932705  42.86464
plot(forecast(abestTRX))

TRX2 <- forecast(abestTRX, h=288)
# 14.5 Write results as csv
write.csv(TRX2, file = "TRX_predict.csv", row.names = FALSE)


# 15. Bitcoin_Gold BTG forecasting
dataBTG.ts <- ts(data$Bitcoin_Gold)
head(dataBTG.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 181.82 182.10 182.31 181.60 180.88 178.25
# 15.1. Training set 
xt <- window(dataBTG.ts, end=c(13251))
plot(xt, type="l", main="BTG prices b/w 1/17/2018 and 3/12/2018", ylab="BTG Price",xlab="Time", bty="l")

# 15.2. Evaluation set
xf <- window(dataBTG.ts, start=c(13252))
plot(xf, type="l", main="BTG prices b/w 3/12/2018 and 3/23/2018", ylab="BTG Price",xlab="Time", bty="l")

# 15.3. Choosing smoothing models with etc
sbestBTG <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestBTG
## ETS(A,N,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
## 
##   Initial states:
##     l = 88.7181 
## 
##   sigma:  0.3706
## 
##      AIC     AICc      BIC 
## 3998.394 3998.423 4012.598
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestBTG,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set  -0.004040924  0.3702057  0.2670903  -0.005615326  0.3167011
## Test set     -17.821198555 19.6633481 17.8367854 -28.259556277 28.2776959
##                    MASE       ACF1 Theil's U
## Training set  0.9988239 0.09390921        NA
## Test set     66.7033201 1.00000000  65.01071
plot(forecast(sbestBTG))

BTG1 <- forecast.ets(sbestBTG, h=288)
# 15.4 Choosing ARIMA models...
abestBTG <- auto.arima(xt)
abestBTG
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.1141
## s.e.  0.0087
## 
## sigma^2 estimated as 0.6421:  log likelihood=-15841.15
## AIC=31686.3   AICc=31686.3   BIC=31701.29
summary(abestBTG)
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.1141
## s.e.  0.0087
## 
## sigma^2 estimated as 0.6421:  log likelihood=-15841.15
## AIC=31686.3   AICc=31686.3   BIC=31701.29
## 
## Training set error measures:
##                        ME      RMSE       MAE          MPE      MAPE
## Training set -0.006441957 0.8037702 0.4420425 -0.006384214 0.3199849
##                   MASE        ACF1
## Training set 0.9968958 0.005157639
accuracy(forecast(abestBTG,h=2015), xf)
##                         ME       RMSE        MAE           MPE       MAPE
## Training set  -0.006441957  0.8037702  0.4420425  -0.006384214  0.3199849
## Test set     -17.484758746 19.3434935 17.5240410 -27.837827013 27.8836293
##                    MASE        ACF1 Theil's U
## Training set  0.9968958 0.005157639        NA
## Test set     39.5202813 1.000000000  63.21644
plot(forecast(abestBTG))

BTG2 <- forecast(abestBTG, h=288)
# 15.5 Write results as csv
write.csv(BTG2, file = "BTG_predict.csv", row.names = FALSE)


# 16. Ethereum_Classic ETC forecasting
dataETC.ts <- ts(data$Ethereum_Classic)
head(dataETC.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 28.14 28.10 28.15 27.84 27.76 27.43
# 16.1. Training set 
xt <- window(dataETC.ts, end=c(13251))
plot(xt, type="l", main="ETC prices b/w 1/17/2018 and 3/12/2018", ylab="ETC Price",xlab="Time", bty="l")

# 16.2. Evaluation set
xf <- window(dataETC.ts, start=c(13252))
plot(xf, type="l", main="ETC prices b/w 3/12/2018 and 3/23/2018", ylab="ETC Price",xlab="Time", bty="l")

# 16.3. Choosing smoothing models with etc
sbestETC <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestETC
## ETS(M,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.2448 
##     phi   = 0.8 
## 
##   Initial states:
##     l = 21.4203 
##     b = -0.0744 
## 
##   sigma:  0.0049
## 
##      AIC     AICc      BIC 
## 1926.667 1926.768 1955.068
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestETC,h=2015), xf)
##                         ME      RMSE        MAE         MPE       MAPE
## Training set  0.0003739653 0.1102463 0.07553001   0.0015201  0.3406111
## Test set     -3.5265936409 3.9199488 3.52675743 -20.4249401 20.4256919
##                    MASE      ACF1 Theil's U
## Training set  0.9963786 0.1034982        NA
## Test set     46.5243630 0.9957586  42.05279
plot(forecast(sbestETC))

ETC1 <- forecast.ets(sbestETC, h=288)
# 16.4 Choosing ARIMA models...
abestETC <- auto.arima(xt)
abestETC
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.2266
## s.e.  0.0085
## 
## sigma^2 estimated as 0.0182:  log likelihood=7612.52
## AIC=-15221.04   AICc=-15221.04   BIC=-15206.05
summary(abestETC)
## Series: xt 
## ARIMA(1,1,0) 
## 
## Coefficients:
##          ar1
##       0.2266
## s.e.  0.0085
## 
## sigma^2 estimated as 0.0182:  log likelihood=7612.52
## AIC=-15221.04   AICc=-15221.04   BIC=-15206.05
## 
## Training set error measures:
##                         ME      RMSE        MAE          MPE      MAPE
## Training set -0.0004127525 0.1353237 0.08928594 -0.002456444 0.3154507
##                   MASE        ACF1
## Training set 0.9722936 0.003278431
accuracy(forecast(abestETC,h=2015), xf)
##                         ME      RMSE        MAE           MPE       MAPE
## Training set -0.0004127525 0.1353237 0.08928594  -0.002456444  0.3154507
## Test set     -3.0912504631 3.5211157 3.10318584 -17.966485924 18.0216344
##                    MASE        ACF1 Theil's U
## Training set  0.9722936 0.003278431        NA
## Test set     33.7926411 0.996490142  37.74399
plot(forecast(abestETC))

ETC2 <- forecast(abestETC, h=288)
# 16.5 Write results as csv
write.csv(ETC2, file = "ETC_predict.csv", row.names = FALSE)

# 17. Lisk LSK forecasting
dataLSK.ts <- ts(data$Lisk)
head(dataLSK.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 18.94 18.90 19.08 19.06 19.00 18.78
# 17.1. Training set 
xt <- window(dataLSK.ts, end=c(13251))
plot(xt, type="l", main="LSK prices b/w 1/17/2018 and 3/12/2018", ylab="LSK Price",xlab="Time", bty="l")

# 17.2. Evaluation set
xf <- window(dataLSK.ts, start=c(13252))
plot(xf, type="l", main="LSK prices b/w 3/12/2018 and 3/23/2018", ylab="LSK Price",xlab="Time", bty="l")

# 17.3. Choosing smoothing models with etc
sbestLSK <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestLSK
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.8827 
##     beta  = 0.0036 
##     phi   = 0.9463 
## 
##   Initial states:
##     l = 15.6464 
##     b = 0.0725 
## 
##   sigma:  0.0907
## 
##      AIC     AICc      BIC 
## 1630.497 1630.598 1658.898
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestLSK,h=2015), xf)
##                        ME      RMSE        MAE          MPE      MAPE
## Training set -0.002511132 0.0904182 0.06627844  -0.01773425  0.443357
## Test set     -2.135049857 2.3870040 2.14027252 -17.51562883 17.550227
##                    MASE         ACF1 Theil's U
## Training set  0.9807339 -0.008793815        NA
## Test set     31.6699938  0.997732907  32.85061
plot(forecast(sbestLSK))

LSK1 <- forecast.ets(sbestLSK, h=288)
# 17.4 Choosing ARIMA models...
abestLSK <- auto.arima(xt)
abestLSK
## Series: xt 
## ARIMA(4,1,0) 
## 
## Coefficients:
##          ar1      ar2     ar3     ar4
##       0.0237  -0.0040  0.0313  0.0543
## s.e.  0.0087   0.0088  0.0095  0.0089
## 
## sigma^2 estimated as 0.02308:  log likelihood=6060.15
## AIC=-12110.3   AICc=-12110.3   BIC=-12072.84
summary(abestLSK)
## Series: xt 
## ARIMA(4,1,0) 
## 
## Coefficients:
##          ar1      ar2     ar3     ar4
##       0.0237  -0.0040  0.0313  0.0543
## s.e.  0.0087   0.0088  0.0095  0.0089
## 
## sigma^2 estimated as 0.02308:  log likelihood=6060.15
## AIC=-12110.3   AICc=-12110.3   BIC=-12072.84
## 
## Training set error measures:
##                         ME      RMSE        MAE          MPE      MAPE
## Training set -0.0003367563 0.1523618 0.09202595 -0.003789225 0.4249497
##                  MASE         ACF1
## Training set 1.001695 -0.002854868
accuracy(forecast(abestLSK,h=2015), xf)
##                         ME      RMSE        MAE           MPE       MAPE
## Training set -0.0003367563 0.1523618 0.09202595  -0.003789225  0.4249497
## Test set     -2.0483681570 2.3095005 2.05844214 -16.872435253 16.9392885
##                   MASE         ACF1 Theil's U
## Training set  1.001695 -0.002854868        NA
## Test set     22.405977  0.996979806  30.13846
plot(forecast(abestLSK))

LSK2 <- forecast(abestLSK, h=288)
# 17.5 Write results as csv
write.csv(LSK2, file = "LSK_predict.csv", row.names = FALSE)



# 18. Zcash ZEC forecasting
dataZEC.ts <- ts(data$Zcash)
head(dataZEC.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 468.34 467.89 469.04 464.66 464.11 460.67
# 18.1. Training set 
xt <- window(dataZEC.ts, end=c(13251))
plot(xt, type="l", main="ZEC prices b/w 1/17/2018 and 3/12/2018", ylab="ZEC Price",xlab="Time", bty="l")

# 18.2. Evaluation set
xf <- window(dataZEC.ts, start=c(13252))
plot(xf, type="l", main="ZEC prices b/w 3/12/2018 and 3/23/2018", ylab="ZEC Price",xlab="Time", bty="l")

# 18.3. Choosing smoothing models with etc
sbestZEC <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestZEC
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.9999 
##     beta  = 0.1602 
##     phi   = 0.8 
## 
##   Initial states:
##     l = 331.861 
##     b = 1.1371 
## 
##   sigma:  1.2962
## 
##      AIC     AICc      BIC 
## 6107.158 6107.258 6135.565
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestZEC,h=2015), xf)
##                       ME      RMSE        MAE           MPE       MAPE
## Training set  -0.0188854  1.292347  0.8979156  -0.006187013  0.2925148
## Test set     -58.8283353 64.535142 58.8307267 -24.735904969 24.7366723
##                   MASE       ACF1 Theil's U
## Training set  1.010124 0.02422948        NA
## Test set     66.182499 0.99999866  57.37532
plot(forecast(sbestZEC))

ZEC1 <- forecast.ets(sbestZEC, h=288)
# 18.4 Choosing ARIMA models...
abestZEC <- auto.arima(xt)
abestZEC
## Series: xt 
## ARIMA(5,1,0) 
## 
## Coefficients:
##          ar1     ar2     ar3     ar4     ar5
##       0.1884  0.0313  0.0190  0.0148  0.0169
## s.e.  0.0087  0.0089  0.0089  0.0089  0.0088
## 
## sigma^2 estimated as 2.955:  log likelihood=-25892.35
## AIC=51796.7   AICc=51796.7   BIC=51841.65
summary(abestZEC)
## Series: xt 
## ARIMA(5,1,0) 
## 
## Coefficients:
##          ar1     ar2     ar3     ar4     ar5
##       0.1884  0.0313  0.0190  0.0148  0.0169
## s.e.  0.0087  0.0089  0.0089  0.0089  0.0088
## 
## sigma^2 estimated as 2.955:  log likelihood=-25892.35
## AIC=51796.7   AICc=51796.7   BIC=51841.65
## 
## Training set error measures:
##                        ME     RMSE      MAE          MPE      MAPE
## Training set -0.009371997 1.724046 1.098203 -0.003015154 0.2675438
##                   MASE        ACF1
## Training set 0.9895604 0.002151201
accuracy(forecast(abestZEC,h=2015), xf)
##                         ME      RMSE       MAE           MPE       MAPE
## Training set  -0.009371997  1.724046  1.098203  -0.003015154  0.2675438
## Test set     -51.098256260 57.432442 51.346710 -21.634966772 21.7155720
##                    MASE        ACF1 Theil's U
## Training set  0.9895604 0.002151201        NA
## Test set     46.2671112 0.999998924   49.6687
plot(forecast(abestZEC))

ZEC2 <- forecast(abestZEC, h=288)
# 18.5 Write results as csv
write.csv(ZEC2, file = "ZEC_predict.csv", row.names = FALSE)


# 19. Tether USDT forecasting
dataUSDT.ts <- ts(data$Tether)
head(dataUSDT.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 1.03 1.03 1.03 1.03 1.03 1.03
# 19.1. Training set 
xt <- window(dataUSDT.ts, end=c(13251))
plot(xt, type="l", main="USDT prices b/w 1/17/2018 and 3/12/2018", ylab="USDT Price",xlab="Time", bty="l")

# 19.2. Evaluation set
xf <- window(dataUSDT.ts, start=c(13252))
plot(xf, type="l", main="USDT prices b/w 3/12/2018 and 3/23/2018", ylab="USDT Price",xlab="Time", bty="l")

# 19.3. Choosing smoothing models with etc
sbestUSDT <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestUSDT
## ETS(M,N,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.1438 
## 
##   Initial states:
##     l = 0.9995 
## 
##   sigma:  0.0038
## 
##       AIC      AICc       BIC 
## -3692.794 -3692.766 -3678.594
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestUSDT,h=2015), xf)
##                         ME        RMSE         MAE           MPE      MAPE
## Training set  4.966669e-06 0.003819451 0.002484577 -0.0008477392 0.2475770
## Test set     -4.263405e-04 0.003221195 0.001604810 -0.0436627220 0.1603052
##                   MASE       ACF1 Theil's U
## Training set 0.9394835 0.06522387        NA
## Test set     0.6068206 0.17450018 0.7844779
plot(forecast(sbestUSDT))

USDT1 <- forecast.ets(sbestUSDT, h=288)
# 19.4 Choosing ARIMA models...
abestUSDT <- auto.arima(xt)
abestUSDT
## Series: xt 
## ARIMA(5,1,0) 
## 
## Coefficients:
##           ar1      ar2      ar3      ar4      ar5
##       -0.6270  -0.4884  -0.3470  -0.2280  -0.1345
## s.e.   0.0087   0.0100   0.0105   0.0101   0.0087
## 
## sigma^2 estimated as 1.797e-05:  log likelihood=53205.97
## AIC=-106399.9   AICc=-106399.9   BIC=-106355
summary(abestUSDT)
## Series: xt 
## ARIMA(5,1,0) 
## 
## Coefficients:
##           ar1      ar2      ar3      ar4      ar5
##       -0.6270  -0.4884  -0.3470  -0.2280  -0.1345
## s.e.   0.0087   0.0100   0.0105   0.0101   0.0087
## 
## sigma^2 estimated as 1.797e-05:  log likelihood=53205.97
## AIC=-106399.9   AICc=-106399.9   BIC=-106355
## 
## Training set error measures:
##                         ME        RMSE        MAE         MPE      MAPE
## Training set -6.144416e-06 0.004251507 0.00259713 -0.00208798 0.2590423
##                   MASE         ACF1
## Training set 0.9707495 -0.007291314
accuracy(forecast(abestUSDT,h=2015), xf)
##                         ME        RMSE         MAE         MPE      MAPE
## Training set -6.144416e-06 0.004251507 0.002597130 -0.00208798 0.2590423
## Test set     -1.150941e-04 0.003176375 0.001511596 -0.01251775 0.1509393
##                   MASE         ACF1 Theil's U
## Training set 0.9707495 -0.007291314        NA
## Test set     0.5650012  0.181282449 0.7809183
plot(forecast(abestUSDT))

USDT2 <- forecast(abestUSDT, h=288)
# 19.5 Write results as csv
write.csv(USDT2, file = "USDT_predict.csv", row.names = FALSE)


# 20. Dogecoin DOGE forecasting
dataDOGE.ts <- ts(data$Dogecoin)
head(dataDOGE.ts)
## Time Series:
## Start = 1 
## End = 6 
## Frequency = 1 
## [1] 0.006842 0.006857 0.006892 0.006868 0.006899 0.006879
# 20.1. Training set 
xt <- window(dataDOGE.ts, end=c(13251))
plot(xt, type="l", main="DOGE prices b/w 1/17/2018 and 3/12/2018", ylab="DOGE Price",xlab="Time", bty="l")

# 20.2. Evaluation set
xf <- window(dataDOGE.ts, start=c(13252))
plot(xf, type="l", main="DOGE prices b/w 3/12/2018 and 3/23/2018", ylab="DOGE Price",xlab="Time", bty="l")

# 20.3. Choosing smoothing models with etc
sbestDOGE <- ets(xt)
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series
sbestDOGE
## ETS(A,Ad,N) 
## 
## Call:
##  ets(y = xt) 
## 
##   Smoothing parameters:
##     alpha = 0.8452 
##     beta  = 0.0088 
##     phi   = 0.9636 
## 
##   Initial states:
##     l = 0.0039 
##     b = 0 
## 
##   sigma:  0
## 
##       AIC      AICc       BIC 
## -12133.25 -12133.15 -12104.84
plot(ets(xt))
## Warning in ets(xt): Missing values encountered. Using longest contiguous
## portion of time series

accuracy(forecast(sbestUSDT,h=2015), xf)
##                         ME        RMSE         MAE           MPE
## Training set  4.966669e-06 0.003819451 0.002484577 -8.477392e-04
## Test set     -9.965879e-01 0.996587923 0.996587874 -2.823899e+04
##                      MAPE        MASE       ACF1 Theil's U
## Training set     0.247577   0.9394835 0.06522387        NA
## Test set     28238.986034 376.8359743 0.99931391  49864.34
plot(forecast(sbestDOGE))

DOGE1 <- forecast.ets(sbestDOGE, h=288)
# 20.4 Choosing ARIMA models...
abestDOGE <- auto.arima(xt)
abestDOGE
## Series: xt 
## ARIMA(0,1,0) 
## 
## sigma^2 estimated as 1.334e-09:  log likelihood=115782.4
## AIC=-231562.8   AICc=-231562.8   BIC=-231555.3
summary(abestDOGE)
## Series: xt 
## ARIMA(0,1,0) 
## 
## sigma^2 estimated as 1.334e-09:  log likelihood=115782.4
## AIC=-231562.8   AICc=-231562.8   BIC=-231555.3
## 
## Training set error measures:
##                         ME         RMSE         MAE         MPE      MAPE
## Training set -2.136158e-07 3.663385e-05 2.42237e-05 -0.00585372 0.4127233
##                   MASE        ACF1
## Training set 0.9978651 -0.03694326
accuracy(forecast(abestDOGE,h=2015), xf)
##                         ME         RMSE         MAE          MPE
## Training set -2.136158e-07 3.663385e-05 2.42237e-05  -0.00585372
## Test set     -5.750789e-04 6.527472e-04 5.79048e-04 -17.06825342
##                    MAPE       MASE        ACF1 Theil's U
## Training set  0.4127233  0.9978651 -0.03694326        NA
## Test set     17.1624076 23.8531594  0.99948768  35.09723
plot(forecast(abestDOGE))

DOGE2 <- forecast(abestDOGE, h=288)
# 20.5 Write results as csv
write.csv(DOGE2, file = "DOGE_predict.csv", row.names = FALSE)