Added generic pytorch implementation for multi layer linear NN.

This commit is contained in:
Daniel Ledda
2020-07-11 12:02:22 +02:00
parent ce4172da5d
commit 88508a0d07
15 changed files with 171 additions and 30 deletions

View File

@@ -1,15 +1,11 @@
import numpy as np
from recordclass import recordclass
from typing import NamedTuple, Tuple, List, Callable, Generator
from import_data import train_x_y, test_x_y
from import_data import get_training_data_generator, get_test_data_generator
from custom_types import LossFun
import sys
class LossFun(NamedTuple):
exec: Callable[[np.array, np.array], float]
deriv: Callable[[np.array, np.array], np.array]
def sum_squares_loss_func(predicted: np.array, gold: np.array) -> float:
return sum((predicted - gold) ** 2)
@@ -106,10 +102,11 @@ class FFNeuralNetwork:
def train_and_test_neural_network():
model = FFNeuralNetwork([28**2, 100, 10], sum_squares_loss, 0.0001)
training_data_gen = train_x_y(1000)
test_data = test_x_y(10)()
test_data = get_test_data_generator(10)()
model.train(training_data_gen, 5)
for test_datum, label in test_data:
print(model.feed_forward(test_datum), label)
prediction = model.feed_forward(test_datum)
print(prediction, label, label == prediction)
np.set_printoptions(threshold=sys.maxsize)
print(model.layers[0].weights)