-
Notifications
You must be signed in to change notification settings - Fork 0
/
ANN_for_transcrtical_COP.py
100 lines (67 loc) · 2.94 KB
/
ANN_for_transcrtical_COP.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# -*- coding: utf-8 -*-
"""Mine Regression ANN
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/17R9ApvL9gl3lPHzTgHOoAKQ06BCDJUKx
# Artificial Neural Network
## Importing the libraries
"""
import numpy as np
import tensorflow as tf
import pandas as pd
#these libraries are needed to make the ANN model
"""## Part 1 - Data Preprocessing
### Importing the dataset
"""
dataset = pd.read_csv("transcritical.csv")
#this function creates a datadframe named as dataset
# all the values from the file are read and stored in the dataset variable as pandas dataframe
X = dataset.iloc[:,:-1].values
# creating independent variable (input features)
y = dataset.iloc[:,-1].values
# creating dependent variable COP
print(y)
"""### Splitting the dataset into the Training set and Test set"""
#this is done in order to keep some data separate so as to test our model on that new data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X , y, test_size = 0.2, random_state = 0)
# 2 variables of independent (features) and 2 variables of dependent variable created.
"""###Feature scaling"""
#this is done in order to bring all the features on par with each other so that model doesn't discriminate
# (value - mean)/ Standard Deviation
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc_y = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test=sc.transform(X_test)
y_train = sc_y.fit_transform(y_train.reshape(len(y_train),1))
y_test = sc_y.transform(y_test.reshape(len(y_test),1))
print(X_train)
"""## Part 2 - Building the ANN
### Initializing the ANN
"""
ann = tf.keras.models.Sequential()
"""### Adding the input layer and the first hidden layer"""
ann.add(tf.keras.layers.Dense(units = 6, activation= "relu"))
#rectifier linear unit activation function used to break the linearity betwn input and 1st hidden layer
"""### Adding the second hidden layer"""
ann.add(tf.keras.layers.Dense(units = 6, activation= "relu"))
"""### Adding the output layer"""
ann.add(tf.keras.layers.Dense(units = 1, activation="linear"))
#no actiivation function in the output layer if we are doing continous prediction(regression) or use linear
"""## Part 3 - Training the ANN
### Compiling the ANN
"""
ann.compile(optimizer="adam", loss = "mean_squared_logarithmic_error", metrics=['mse'])
#optimizer updates all the weights in the network during backpropagation
"""### Training the ANN model on the Training set"""
ann.fit(X_train , y_train ,validation_data=(X_test, y_test), batch_size = 32 , epochs = 100)
"""### Predicting the results of the Test set"""
y_pred = ann.predict(X_test)
print(y_pred)
y_test = sc_y.inverse_transform(y_test.reshape(len(y_test),1))
y_pred = sc_y.inverse_transform(y_pred.reshape(len(y_pred),1))
np.set_printoptions(precision=2)
print(np.concatenate((y_test, y_pred),1))
from sklearn.metrics import r2_score
r2_score(y_test, y_pred)