import numpy as np
# two inputs [sleep,study]
X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)
# one output [Expected % in Exams] # Labels(Marks obtained)
y = np.array(([92], [86], [89]), dtype=float)
# maximum of X array longitudinally
X = X/np.amax(X,axis=0) # Normalize # maximum of X array
y = y/100 # max test score is 100
#Sigmoid Function
def sigmoid (x):
return 1/(1 + np.exp(-x))
#Derivative of Sigmoid Function
def derivatives_sigmoid(x):
return x * (1 - x)
#Variable initialization
epoch=2000 #Setting training iterations
lr=0.1 #Setting learning rate
inputlayer_neurons = 2 #number of features in data set
hiddenlayer_neurons = 3 #number of hidden layers neurons
output_neurons = 1 #number of neurons at output layer
#weight and bias-Random initialization
wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
#weight of the link from input node to hidden node 2*3
# bias of the link from input node to hidden node 1*3
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
#weight of the link from hidden node to output node 1*1
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
#bias of the link from hidden node to output node
bout=np.random.uniform(size=(1,output_neurons))
#draws a random range of numbers uniformly of dim x*y
for i in range(epoch):
#Forward Propogation
hinp1=np.dot(X,wh) # Dot product + bias
hinp=hinp1 + bh
hlayer_act = sigmoid(hinp) # Activation function
outinp1=np.dot(hlayer_act,wout)
outinp= outinp1+ bout
output = sigmoid(outinp)
#Backpropagation
# Error at Output layer
EO = y-output # Errj=Oj(1-Oj)(Tj-Oj)
outgrad = derivatives_sigmoid(output)
d_output = EO* outgrad
EH = d_output.dot(wout.T) # .T means transpose
#how much hidden layer weights contributed to error
hiddengrad = derivatives_sigmoid(hlayer_act)
d_hiddenlayer = EH * hiddengrad
# dotproduct of nextlayererror and currentlayerop
wout += hlayer_act.T.dot(d_output) *lr
wh += X.T.dot(d_hiddenlayer) *lr
print("Input: \n" + str(X))
print("Actual Output: \n" + str(y))
print("Predicted Output: \n" ,output)
0 Comments