|
- import time
-
- import numpy as np
-
- class Epoch:
- def __init__(self, epoch, inputs, labels, learning_rate, batch_size):
- self.epoch = epoch
- self.loss = -1.0
- self.duration = 0
- self.learning_rate = learning_rate
- self.batch_size = batch_size
- self.batches = []
- for i in range(0, len(inputs), self.batch_size):
- self.batches.append(TrainingBatch(i, inputs[i:i + batch_size], labels[i:i + batch_size]))
- self.layer_dl_gradients = []
- self.layer_dl_biases = []
- self.layer_weights = []
- self.finished = False
-
- def start(self):
- self.start_time = time.time()
-
- def finish(self, neural_net):
- self.finished = True
- self.trained_weights = neural_net.get_all_weights()
- self.end_time = time.time()
- self.duration = self.end_time - self.start_time
-
- def all_predictions(self):
- return np.concatenate(np.array([batch.predictions for batch in self.batches]))
- def all_labels(self):
- return np.concatenate(np.array([batch.labels for batch in self.batches]))
- def all_inputs(self):
- return np.concatenate(np.array([batch.inputs for batch in self.batches]))
-
- def print_epoch(self):
- print(f"Epoch {self.epoch}:")
- print(f"Loss: {self.loss}")
- print(f"dL / Gradients: {self.layer_dl_gradients}")
- print(f"dL / Bias: {self.layer_dl_gradients}")
-
- class TrainingBatch:
- def __init__(self, batch_num, inputs, labels):
- self.batch_num = batch_num
- self.inputs = inputs
- self.labels = labels
- self.predictions = []
|