|
- import unittest
-
- import numpy as np
-
- from neural_net.activation_layers.relu_layer import ReluLayer
-
-
- # noinspection PyMethodMayBeStatic
- class ReluLayerTests(unittest.TestCase):
-
- def test_relu_layer_1x1(self):
- ##############
- # Arrange #
- ##############
- inputs = np.array([[1.0]])
- weights = np.array([[0.5]])
- biases = np.array([0.0])
- learning_rate = 0.001
-
- # Pre-activation value (z)
- # This is the intermediate value calculated as the weighted sum of inputs plus the bias.
- z = np.dot(inputs, weights) + biases
-
- # ReLU activation: f(z) = max(0, z)
- # The expected output after applying the ReLU activation function
- expected_output = np.maximum(0, z)
-
- # Loss gradient dL/dout
- # Represents how much the loss changes when the output changes.
- dL_dout = np.array([[1.0]])
-
- # Activation derivative dout/dz
- # For ReLU: If z > 0, dout/dz = 1; otherwise, dout/dz = 0
- dout_dz = np.where(z > 0, 1.0, 0.0)
-
- # Gradient of the loss with respect to weights (dL/dweights)
- # This represents how much the loss changes when the weights change.
- # Formula: dL/dweights = inputs × dL/dout × σ′(z)
- expected_dl_dweights = inputs * dL_dout * dout_dz
- # Gradient of the loss with respect to the bias (dL/dbias)
- expected_dL_dbias = np.sum(dL_dout * dout_dz)
-
- # Gradient of the loss with respect to inputs (dL/dinputs)
- # This is the gradient of the loss with respect to the input of the neuron or layer, often needed if you want to backpropagate further.
- # Formula: dL / dinputs = dL/dout × σ′(z) × weights
- expected_dl_dinputs = dL_dout * dout_dz * weights
-
- # Calculate expected new weights and biases
- expected_weights = weights - learning_rate * expected_dl_dweights
- expected_biases = biases - learning_rate * expected_dL_dbias
-
- # Initialize SigmoidLayer
- layer = ReluLayer(weights.shape[0], weights.shape[1], weights=weights, biases=biases)
-
- ##############
- # Act #
- ##############
- # Forward pass
- output = layer.forward(inputs)
-
- # Backward pass
- dl_dinputs = layer.backward(dL_dout, learning_rate)
-
- ##############
- # Assert #
- ##############
- ##############
- # Assert #
- ##############
- # Forward output correctness
- self.assertTrue(np.allclose(output, expected_output, atol=1e-6),
- f"Forward output incorrect: Actual: {output}, Expected: {expected_output}")
-
- # Backward pass correctness
- self.assertTrue(np.allclose(dl_dinputs, expected_dl_dinputs, atol=1e-6),
- f"Inputs derivative incorrect Actual: {dl_dinputs}, expected: {expected_dl_dinputs}")
- self.assertTrue(np.allclose(layer.weights, expected_weights, atol=1e-6),
- f"Weight update incorrect Actual: {layer.weights}, expected: {expected_weights}")
- self.assertTrue(np.allclose(layer.biases, expected_biases, atol=1e-6),
- f"Bias update incorrect Actual: {layer.biases}, expected: {expected_biases}")
-
- def test_relu_layer_2x2(self):
- ##############
- # Arrange #
- ##############
- inputs = np.array([[1.0, 2.0],
- [3.0, 4.0]]) # 2x2 input matrix
-
- weights = np.array([[0.5, 0.2],
- [0.3, 0.7]]) # 2x2 weight matrix
-
- biases = np.array([0.1, -0.1]) # 2 biases, one for each neuron
-
- learning_rate = 0.001 # Learning rate for weight updates
-
- # Pre-activation value (z)
- # z = inputs.dot(weights) + biases
- z = np.dot(inputs, weights) + biases
-
- # Expected output using the ReLU activation function
- expected_output = np.maximum(0, z) # Apply ReLU
-
- # Loss gradient dL/dout (assuming a gradient of 1 for simplicity)
- dL_dout = np.array([[1.0, 1.0],
- [1.0, 1.0]])
-
- # Activation derivative dout/dz
- # For ReLU: dout/dz = 1 where z > 0, and dout/dz = 0 where z <= 0
- dout_dz = np.where(z > 0, 1.0, 0.0)
-
- # Expected gradients (for backpropagation)
- # Expected gradients with respect to weights
- expected_dl_dweights = np.dot(inputs.T, dL_dout * dout_dz)
-
- # Expected gradients with respect to biases
- expected_dL_dbias = np.sum(dL_dout * dout_dz, axis=0)
-
- # Expected gradients with respect to inputs
- expected_dl_dinputs = np.dot(dL_dout * dout_dz, weights.T)
-
- # Expected updated weights and biases after backpropagation
- expected_weights = weights - learning_rate * expected_dl_dweights
- expected_biases = biases - learning_rate * expected_dL_dbias
-
- # Initialize the ReLU Layer
- layer = ReluLayer(weights.shape[0], weights.shape[1], weights=weights, biases=biases)
-
- ##############
- # Act #
- ##############
- # Forward pass
- output = layer.forward(inputs)
-
- # Backward pass
- dl_dinputs = layer.backward(dL_dout, learning_rate)
-
- ##############
- # Assert #
- ##############
- # Forward output correctness
- self.assertTrue(np.allclose(output, expected_output, atol=1e-6),
- f"Forward output incorrect: Actual: {output}, Expected: {expected_output}")
-
- # Backward pass correctness (for input gradients)
- self.assertTrue(np.allclose(dl_dinputs, expected_dl_dinputs, atol=1e-6),
- f"Inputs derivative incorrect Actual: {dl_dinputs}, Expected: {expected_dl_dinputs}")
-
- # Check weight updates
- self.assertTrue(np.allclose(layer.weights, expected_weights, atol=1e-6),
- f"Weight update incorrect Actual: {layer.weights}, Expected: {expected_weights}")
-
- # Check bias updates
- self.assertTrue(np.allclose(layer.biases, expected_biases, atol=1e-6),
- f"Bias update incorrect Actual: {layer.biases}, Expected: {expected_biases}")
|