|
- import unittest
-
- import numpy as np
-
- from neural_net.mnist import MNISTNeuralNet
- from neural_net.functions.loss import cross_entropy_loss
-
-
- # noinspection PyMethodMayBeStatic
- class MNISTNeuralNetTests(unittest.TestCase):
-
- def test_loss(self):
- mnist = MNISTNeuralNet()
- # Sample predictions and labels for testing the loss function
- predictions = np.array([[0.1, 0.2, 0.7], # Example of a softmax output (probabilities)
- [0.2, 0.6, 0.2]])
-
- # Corresponding labels (correct class indices)
- labels = np.array([2, 1]) # Labels are class indices (not one-hot)
-
- # Expected loss (you may need to compute this manually to verify correctness)
- expected_loss = cross_entropy_loss(predictions, labels) # Replace with the actual expected loss value
-
- # Call the loss function
- computed_loss = mnist.loss(predictions, labels)
-
- # Assert that the computed loss matches the expected loss
- self.assertAlmostEqual(computed_loss, expected_loss, places=5, msg="Loss function is incorrect")
-
- def test_derivative_loss(self):
- mnist = MNISTNeuralNet()
- # Sample predictions and labels for testing the derivative of the loss function
- predictions = np.array([[0.1, 0.2, 0.7], # Example of softmax output (probabilities)
- [0.2, 0.6, 0.2]])
-
- # Corresponding labels (correct class indices)
- labels = np.array([2, 1]) # Labels are class indices
-
- # Expected derivative of loss (manually computed or from a trusted source)
- expected_derivative = np.array([[0.1, 0.2, -0.3], # Replace with actual expected gradient
- [0.2, -0.4, 0.2]])
-
- # Call the derivative loss function
- computed_derivative = mnist.loss_derivative(predictions, labels)
-
- # Assert that the computed derivative matches the expected derivative
- np.testing.assert_array_almost_equal(computed_derivative, expected_derivative, decimal=5,
- err_msg="Derivative of loss function is incorrect")
-
- def test_derivative_loss2(self):
- mnist = MNISTNeuralNet()
-
- # Given outputs
- outputs = np.array([
- [0.06873367, 0.043651, 0.043651, 0.05235898, 0.043651, 0.043651,
- 0.043651, 0.043651, 0.0563062, 0.043651],
- [0.043651, 0.043651, 0.05704588, 0.0551587, 0.05460022, 0.043651,
- 0.043651, 0.043651, 0.07723706, 0.05474726]
- ])
-
- # Labels
- labels = [7, 2]
- num_classes = 10
-
- # Convert labels to one-hot encoding
- labels_one_hot = np.zeros((len(labels), num_classes))
- for i, label in enumerate(labels):
- labels_one_hot[i, label] = 1
-
- # Calculate the expected loss derivative
- expected_loss_derivative = outputs - labels_one_hot
-
- # Call the derivative loss function
- computed_loss_derivative = mnist.loss_derivative(outputs, labels)
-
- # Assert that the computed derivative matches the expected derivative
- np.testing.assert_array_almost_equal(computed_loss_derivative, expected_loss_derivative, decimal=5,
- err_msg="Derivative of loss function is incorrect")
|