// Erforderliche Bibliotheken importieren
import numpy as np
// Datensatz wird vorbereitet
x = np.array([
[1,0,1,0],
[1,0,1,1],
[0,1,0,1]
])y = np.array([[1],[1],[0]])
// Definition der Aktivierungsfunktion
# Activation operate
# Right here now we have used sigmoid operate that provides the output between 0 and 1
def sigmoid(x):
return 1/(1+np.exp(-x))
def derivativeSigmoid(x):
return x * (1 - x)
// Initialisierung der Anzahl der Neuronen
# enter layer - neurons can be at all times equal to the variety of columns
inputNeurons = x.form[1]
# hidden neurons - resolve by hit and trial
hiddenNeurons = 3
# output neurons - rely on variety of courses now we have in goal column,for eg to categorise 0 and 1
outputNeurons = 1
# Gewichte und Bias initialisieren und das ANN-Modell erstellen
# initializing weights and biases matrices for hidden and output layer randomly
weightsHidden = np.random.uniform(dimension=(inputNeurons, hiddenNeurons))
biasHidden = np.random.uniform(dimension=(1, hiddenNeurons))
weightsOutput = np.random.uniform(dimension=(hiddenNeurons, outputNeurons))
biasOutput = np.random.uniform(dimension=(1, outputNeurons))
# studying fee (by hit and trial)
alpha = 0.04
# variety of iterations (by hit and trial)
epochs = 20000for i in vary(epochs):
# Feedforward propagation
# Step 1 - apply dot product and add bias : f(x) = x.wh + biasHidden
fx = np.dot(x, weightsHidden) + biasHidden
# Step 2 - apply activation operate
hiddenLayer = sigmoid(fx)
# Step 3 - apply dot product and add bias : f(x) = hiddenLayer.wout + biasOut
fx_ = np.dot(hiddenLayer, weightsOutput) + biasOutput
# Step 4 - apply activation on output layer
outputLayer = sigmoid(fx_)
# Backpropagation - loss(y - y^) and optimization of weights and bias
errorOutput = outputLayer - y
# Slope on output layer - by-product of activation operate utilized on this layer
slopeOutput = derivativeSigmoid(outputLayer)
# Delta = error x slope
deltaOutput = errorOutput * slopeOutput
# for hidden layer
errorHidden = np.dot(deltaOutput, weightsOutput.T) # T for taking transpose
slopeHidden = derivativeSigmoid(hiddenLayer)
deltaHidden = errorHidden * slopeHidden
# updating the weights (weights optimization)
weightsOutput = weightsOutput - hiddenLayer.T.dot(deltaOutput)*alpha
weightsHidden = weightsHidden - x.T.dot(deltaHidden)*alpha
biasOutput = biasOutput - np.sum(deltaOutput)*alpha
biasHidden = biasHidden - np.sum(deltaOutput)*alpha
print("Output->", outputLayer)
Ausgabe -> Array([[0.98788798], [0.98006967], [0.02688157]])
Die abgerundete vorhergesagte Ausgabe beträgt [1, 1, 0] was gleich y ist, d. h. [1,1,0] Daher liegt die vorhergesagte Ausgabe nahe an der tatsächlichen Ausgabe.