|
- from neural_net.epoch import Epoch
- from neural_net.neural_net import NeuralNet, ModelData
-
-
- class NeuralNetTrainer:
- def __init__(self, neural_net: NeuralNet, model_data: ModelData, learning_rate: float, batch_size: int):
- self.neural_net = neural_net
- self.model_data = model_data
- self.is_running = False
- self.epoch_history = []
- self.learning_rate = learning_rate
- self.batch_size = batch_size
-
- def set_learning_rate(self, learning_rate: float):
- self.learning_rate = learning_rate
-
- def set_batch_size(self, batch_size: int):
- self.batch_size = batch_size
-
- def run_epoch(self):
- epoch = Epoch(len(self.epoch_history),
- self.model_data.training_inputs,
- self.model_data.training_labels,
- self.learning_rate,
- self.batch_size
- )
- self._train_one_epoch(epoch)
- return epoch
-
- def start(self, on_epoch_finish=None, on_finish=None):
- self.is_running = True
- while True:
- # Stop function was called causing the trainer to reset
- if not self.is_running:
- break
-
- # Perform one epoch of training
- # In the future, we will apply a learning-rate algorithm
- epoch = self.run_epoch()
-
- if on_epoch_finish is not None:
- on_epoch_finish(epoch)
-
- if on_finish is not None:
- on_finish()
- self.stop()
-
- def stop(self):
- if self.is_running:
- self.is_running = False
-
- def _train_one_epoch(self, epoch: Epoch):
- epoch.start()
-
- for batch in epoch.batches:
- batch.predictions = self.neural_net.forward(batch.inputs)
- dL_dout = self.neural_net.loss_derivative(batch.predictions, batch.labels)
-
- layer_dl_gradients, layer_dl_biases, layer_weights, layer_biases = self.neural_net.backward(dL_dout, epoch)
- epoch.layer_dl_gradients.append(layer_dl_gradients)
- epoch.layer_dl_biases.append(layer_dl_biases)
-
- epoch.finish(self.neural_net)
- epoch.loss = self.neural_net.loss(epoch.all_predictions(), epoch.all_labels())
- self.epoch_history.append(epoch)
|