Artificial Neural Network by implementing the Back propagation algorithm and test the same using appropriate data sets.
Back propagation algorithm->
program->
from math import exp
from random import seed
from random import random
def initialize_network(n_inputs,n_hidden,n_outputs):
network=list()
hidden_layer=[{'weights':[random()for i in range(n_inputs+1)]} for i in range(n_hidden)]
network.append(hidden_layer)
output_layer=[{'weights':[random()for i in range(n_hidden+1)]} for i in range(n_outputs)]
network.append(output_layer)
return network
def activate(weights,inputs):
activation=weights[-1]
for i in range(len(weights)-1):
activation+=weights[i]*inputs[i]
return activation
def transfer(activation):
return 1.0/(1.0+exp(-activation))
def forward_propagate(network,row):
inputs=row
for layer in network:
new_inputs=[]
for neuron in layer:
activation=activate(neuron['weights'],inputs)
neuron['output']=transfer(activation)
new_inputs.append(neuron['output'])
inputs=new_inputs
return inputs
def transfer_derivative(output):
return output*(1.0-output)
def backward_propagate_error(network,expected):
for i in reversed(range(len(network))):
layer=network[i]
errors=list()
if i!=len(network)-1:
for j in range(len(layer)):
error=0.0
for neuron in network[i+1]:
error+=(neuron['weights'][j]*neuron['delta'])
errors.append(error)
else:
for j in range(len(layer)):
neuron=layer[j]
errors.append(expected[j]-neuron['output'])
for j in range(len(layer)):
neuron=layer[j]
neuron['delta']=errors[j]*transfer_derivative(neuron['output'])
def update_weights(network,row,l_rate):
for i in range(len(network)):
inputs=row[:-1]
if i!=0:
inputs=[neuron['output'] for neuron in network[i-1]]
for neuron in network[i]:
for j in range(len(inputs)):
neuron['weights'][j]+=l_rate*neuron['delta']*inputs[j]
neuron['weights'][-1]+=l_rate*neuron['delta']
def train_network(network,train,l_rate,n_epoch,n_outputs):
for epoch in range(n_epoch):
sum_error=0
for row in train:
outputs=forward_propagate(network,row)
expected=[0 for i in range(n_outputs)]
expected[row[-1]]=1
sum_error+=sum([expected[i]-outputs[i]**2 for i in range(len(expected))])
backward_propagate_error(network,expected)
update_weights(network,row,l_rate)
print('>epoch=%d,lrate=%.3f,error=%.3f'%(epoch,l_rate,sum_error))
seed(1)
dataset=[[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]
]
n_inputs=len(dataset[0])-1
n_outputs=len(set([row[-1] for row in dataset]))
network=initialize_network(n_inputs,2,n_outputs)
train_network(network,dataset,0.5,20,n_outputs)
print(network)
for layer in network:
print(layer)
Output->
program->
from math import exp
from random import seed
from random import random
def initialize_network(n_inputs,n_hidden,n_outputs):
network=list()
hidden_layer=[{'weights':[random()for i in range(n_inputs+1)]} for i in range(n_hidden)]
network.append(hidden_layer)
output_layer=[{'weights':[random()for i in range(n_hidden+1)]} for i in range(n_outputs)]
network.append(output_layer)
return network
def activate(weights,inputs):
activation=weights[-1]
for i in range(len(weights)-1):
activation+=weights[i]*inputs[i]
return activation
def transfer(activation):
return 1.0/(1.0+exp(-activation))
def forward_propagate(network,row):
inputs=row
for layer in network:
new_inputs=[]
for neuron in layer:
activation=activate(neuron['weights'],inputs)
neuron['output']=transfer(activation)
new_inputs.append(neuron['output'])
inputs=new_inputs
return inputs
def transfer_derivative(output):
return output*(1.0-output)
def backward_propagate_error(network,expected):
for i in reversed(range(len(network))):
layer=network[i]
errors=list()
if i!=len(network)-1:
for j in range(len(layer)):
error=0.0
for neuron in network[i+1]:
error+=(neuron['weights'][j]*neuron['delta'])
errors.append(error)
else:
for j in range(len(layer)):
neuron=layer[j]
errors.append(expected[j]-neuron['output'])
for j in range(len(layer)):
neuron=layer[j]
neuron['delta']=errors[j]*transfer_derivative(neuron['output'])
def update_weights(network,row,l_rate):
for i in range(len(network)):
inputs=row[:-1]
if i!=0:
inputs=[neuron['output'] for neuron in network[i-1]]
for neuron in network[i]:
for j in range(len(inputs)):
neuron['weights'][j]+=l_rate*neuron['delta']*inputs[j]
neuron['weights'][-1]+=l_rate*neuron['delta']
def train_network(network,train,l_rate,n_epoch,n_outputs):
for epoch in range(n_epoch):
sum_error=0
for row in train:
outputs=forward_propagate(network,row)
expected=[0 for i in range(n_outputs)]
expected[row[-1]]=1
sum_error+=sum([expected[i]-outputs[i]**2 for i in range(len(expected))])
backward_propagate_error(network,expected)
update_weights(network,row,l_rate)
print('>epoch=%d,lrate=%.3f,error=%.3f'%(epoch,l_rate,sum_error))
seed(1)
dataset=[[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]
]
n_inputs=len(dataset[0])-1
n_outputs=len(set([row[-1] for row in dataset]))
network=initialize_network(n_inputs,2,n_outputs)
train_network(network,dataset,0.5,20,n_outputs)
print(network)
for layer in network:
print(layer)
Output->
>epoch=0,lrate=0.500,error=-0.857 >epoch=1,lrate=0.500,error=1.759 >epoch=2,lrate=0.500,error=3.443 >epoch=3,lrate=0.500,error=4.262 >epoch=4,lrate=0.500,error=4.624 >epoch=5,lrate=0.500,error=4.784 >epoch=6,lrate=0.500,error=4.856 >epoch=7,lrate=0.500,error=4.888 >epoch=8,lrate=0.500,error=4.902 >epoch=9,lrate=0.500,error=4.909 >epoch=10,lrate=0.500,error=4.912 >epoch=11,lrate=0.500,error=4.913 >epoch=12,lrate=0.500,error=4.914 >epoch=13,lrate=0.500,error=4.914 >epoch=14,lrate=0.500,error=4.914 >epoch=15,lrate=0.500,error=4.914 >epoch=16,lrate=0.500,error=4.914 >epoch=17,lrate=0.500,error=4.914 >epoch=18,lrate=0.500,error=4.914 >epoch=19,lrate=0.500,error=4.914 [[{'weights': [0.13436424411240122, 0.8474337369372327, 0.763774618976614], 'output': 0.99157530623528, 'delta': -0.00016301917329962238}, {'weights': [0.2550690257394217, 0.49543508709194095, 0.4494910647887381], 'output': 0.9844051047537846, 'delta': -0.0009816034444098982}], [{'weights': [0.1352957155754806, -0.06993695351348819, -0.5152093452497912], 'output': 0.42656544528564916, 'delta': -0.10434105007212165}, {'weights': [-0.38274067993191285, 0.7914341824323032, 0.07234040575369743], 'output': 0.5794832633351258, 'delta': 0.10247253219310253}]] [{'weights': [0.13436424411240122, 0.8474337369372327, 0.763774618976614], 'output': 0.99157530623528, 'delta': -0.00016301917329962238}, {'weights': [0.2550690257394217, 0.49543508709194095, 0.4494910647887381], 'output': 0.9844051047537846, 'delta': -0.0009816034444098982}] [{'weights': [0.1352957155754806, -0.06993695351348819, -0.5152093452497912], 'output': 0.42656544528564916, 'delta': -0.10434105007212165}, {'weights': [-0.38274067993191285, 0.7914341824323032, 0.07234040575369743], 'output': 0.5794832633351258, 'delta': 0.10247253219310253}]
Comments
Post a Comment