# Implementierung eines ANN-Modells von Grund auf mit Numpy | von Aashi Gupta | Juni 2023

0
25

// Erforderliche Bibliotheken importieren

`import numpy as np`

// Datensatz wird vorbereitet

`x = np.array([[1,0,1,0],[1,0,1,1],[0,1,0,1]])y = np.array([[1],[1],[0]])`

// Definition der Aktivierungsfunktion

`# Activation operate # Right here now we have used sigmoid operate that provides the output between 0 and 1 def sigmoid(x):return 1/(1+np.exp(-x))`
`def derivativeSigmoid(x):return x * (1 - x)`

// Initialisierung der Anzahl der Neuronen

`# enter layer - neurons can be at all times equal to the variety of columnsinputNeurons = x.form[1]# hidden neurons - resolve by hit and trialhiddenNeurons = 3# output neurons - rely on variety of courses now we have in goal column,for eg to categorise 0 and 1outputNeurons = 1`

# Gewichte und Bias initialisieren und das ANN-Modell erstellen

`# initializing weights and biases matrices for hidden and output layer randomlyweightsHidden = np.random.uniform(dimension=(inputNeurons, hiddenNeurons)) biasHidden = np.random.uniform(dimension=(1, hiddenNeurons))weightsOutput = np.random.uniform(dimension=(hiddenNeurons, outputNeurons)) biasOutput = np.random.uniform(dimension=(1, outputNeurons))`
`# studying fee (by hit and trial)alpha = 0.04 # variety of iterations (by hit and trial)epochs = 20000for i in vary(epochs):# Feedforward propagation# Step 1 - apply dot product and add bias : f(x) = x.wh + biasHiddenfx = np.dot(x, weightsHidden) + biasHidden# Step 2 - apply activation operatehiddenLayer = sigmoid(fx)# Step 3 - apply dot product and add bias : f(x) = hiddenLayer.wout + biasOutfx_ = np.dot(hiddenLayer, weightsOutput) + biasOutput# Step 4 - apply activation on output layeroutputLayer = sigmoid(fx_)# Backpropagation - loss(y - y^) and optimization of weights and biaserrorOutput = outputLayer - y # Slope on output layer - by-product of activation operate utilized on this layerslopeOutput = derivativeSigmoid(outputLayer)# Delta = error x slopedeltaOutput = errorOutput * slopeOutput# for hidden layererrorHidden = np.dot(deltaOutput, weightsOutput.T) # T for taking transposeslopeHidden = derivativeSigmoid(hiddenLayer)deltaHidden = errorHidden * slopeHidden# updating the weights (weights optimization)weightsOutput = weightsOutput - hiddenLayer.T.dot(deltaOutput)*alphaweightsHidden = weightsHidden - x.T.dot(deltaHidden)*alphabiasOutput = biasOutput - np.sum(deltaOutput)*alphabiasHidden = biasHidden - np.sum(deltaOutput)*alpha`
`print("Output->", outputLayer)`

Ausgabe -> Array([[0.98788798], [0.98006967], [0.02688157]])

Die abgerundete vorhergesagte Ausgabe beträgt [1, 1, 0] was gleich y ist, d. h. [1,1,0] Daher liegt die vorhergesagte Ausgabe nahe an der tatsächlichen Ausgabe.