-
Notifications
You must be signed in to change notification settings - Fork 20
/
Copy pathpolyreg.py
109 lines (81 loc) · 2.87 KB
/
polyreg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# -*- coding: utf-8 -*-
"""PolyReg.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1q4b1o3iFGk4vqJM4KLE7T2ClDhJo_QC8
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import requests
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import warnings
warnings.filterwarnings("ignore")
# Fetching data from the server
url = "https://web-api.coinmarketcap.com/v1/cryptocurrency/ohlcv/historical"
param = {"convert":"USD","slug":"bitcoin","time_end":"1601510400","time_start":"1367107200"}
content = requests.get(url=url, params=param).json()
df = pd.json_normalize(content['data']['quotes'])
# Extracting and renaming the important variables
df['Date']=pd.to_datetime(df['quote.USD.timestamp']).dt.tz_localize(None)
df['Low'] = df['quote.USD.low']
df['High'] = df['quote.USD.high']
df['Open'] = df['quote.USD.open']
df['Close'] = df['quote.USD.close']
df['Volume'] = df['quote.USD.volume']
# Drop original and redundant columns
df=df.drop(columns=['time_open','time_close','time_high','time_low', 'quote.USD.low', 'quote.USD.high', 'quote.USD.open', 'quote.USD.close', 'quote.USD.volume', 'quote.USD.market_cap', 'quote.USD.timestamp'])
# Creating a new feature for better representing day-wise values
df['Mean'] = (df['Low'] + df['High'])/2
# Cleaning the data for any NaN or Null fields
df = df.dropna()
# Creating a copy for making small changes
dataset_for_prediction = df.copy()
dataset_for_prediction['Actual']=dataset_for_prediction['Mean'].shift()
dataset_for_prediction=dataset_for_prediction.dropna()
# date time typecast
dataset_for_prediction['Date'] =pd.to_datetime(dataset_for_prediction['Date'])
dataset_for_prediction.index= dataset_for_prediction['Date']
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
N=2411
#test size
X=[]
for i in range(2411):
X.append([i])
#make numpy array
X=np.array(X)
#make train data
Xtrain=X[:N]
Xtest=X[-272:]
Y=df["Mean"]
Y=np.array(Y,dtype='float32')
ytrain=Y[:N]
#make test data
ytest=Y[-272:]
arr=ytest
#plot actual values
plt.plot(arr,label='actual')
# grid serach for optimal polynomial degree
for j in [2,3,5]:
#make polynomial features
poly = PolynomialFeatures(degree = j)
X_poly = poly.fit_transform(Xtrain.reshape((2411,1)))
poly.fit(X_poly, ytrain)
reg = LinearRegression()
reg.fit(X_poly, ytrain)
ypred=reg.predict(poly.fit_transform(Xtest.reshape((272,1))))
ytest=ytest.reshape((272,1))
#plot the same
plt.plot(ypred,label='predicted with degree'+str(j))
plt.legend()
#plt.show()
print("POLYNOMIAL REGRESSION")
c=0
for i in range(272):
c+=(ypred[i]-ytest[i])**2
c/=272
print("Degree=",j," RMSE:",c**0.5)
print()
print("POLYNOMIAL REGRESSION, depending upon no of days")