Skip to content
Snippets Groups Projects
Commit a4f10e70 authored by johannes bilk's avatar johannes bilk
Browse files

Merge branch 'master' into 'main'

corrected one error in network test notebook

See merge request !11
parents 57ef2ef6 685561ac
No related branches found
No related tags found
1 merge request!11corrected one error in network test notebook
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Testing the Network # Testing the Network
## Importing the Basics ## Importing the Basics
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import numpy as np import numpy as np
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
from nn.module import Sequential
from nn.optim import SGD, SGDMomentum, NesterovMomentum, AdaGrad, AdaDelta, RMSprop, Adam from nn.optim import SGD, SGDMomentum, NesterovMomentum, AdaGrad, AdaDelta, RMSprop, Adam
from nn.sequential import Sequential from nn.scheduler import ExponentialLR, SteppedLR, CyclicalLR
from nn.loss import CrossEntropyLoss, MSELoss, NLLLoss, MAELoss, FocalLoss from nn.loss import CrossEntropyLoss, MSELoss, NLLLoss, MAELoss, FocalLoss
from data import Data from data import Data
from nn.observable import NetworkObservables from nn.observable import NetworkObservables
from metric import ConfusionMatrix from metric import ConfusionMatrix
from nn.scheduler import ExponentialLR, SteppedLR, CyclicalLR
from utility import ModelIO, Progressbar from utility import ModelIO, Progressbar
from nn import ( from nn import (
Linear, Dropout, Flatten, Convolution2D, Unsqueeze, Linear, Dropout, Flatten, Convolution2D, Unsqueeze,
BatchNorm1D, BatchNorm2D, BatchNorm1D, BatchNorm2D,
Tanh, SoftMax, Sigmoid, SoftPlus, Relu, Elu, LeakyRelu, SoftSign, Identity Tanh, SoftMax, Sigmoid, SoftPlus, Relu, Elu, LeakyRelu, SoftSign, Identity
) )
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Generating Test Data ## Generating Test Data
Simple data set of 9x9 matrices with one somewhere down or along the matrix. This is for testing purposses only. Simple data set of 9x9 matrices with one somewhere down or along the matrix. This is for testing purposses only.
### Generating Data ### Generating Data
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
categories = 4 categories = 4
data = Data(trainAmount=6400, evalAmount=3200, batchSize=128, kFold=4, dataPath='datafiles') data = Data(trainAmount=6400, evalAmount=3200, batchSize=128, kFold=4, dataPath='datafiles')
data.generateTestData(categories) data.generateTestData(categories)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
plt.imshow(data.trainSet.data[0]) plt.imshow(data.trainSet.data[0])
``` ```
%% Output %% Output
<matplotlib.image.AxesImage at 0x118f56fd0> <matplotlib.image.AxesImage at 0x118f56fd0>
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Creating a Sequential ## Creating a Sequential
This contains a list of layers, that are worked through one by one. Here I add a learning layer and after it an activation layer. This contains a list of layers, that are worked through one by one. Here I add a learning layer and after it an activation layer.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
convolution = False convolution = False
dropout = 0.35 dropout = 0.35
norming = False norming = False
numLayers = 5 numLayers = 5
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
neurons = 81 neurons = 81
network = Sequential() network = Sequential()
if convolution is True: if convolution is True:
neurons = 147 neurons = 147
network.append(Unsqueeze((1,9,9))) network.append(Unsqueeze((1,9,9)))
network.append(Convolution2D(1,3)) network.append(Convolution2D(1,3))
network.append(Tanh()) network.append(Tanh())
if norming == True: if norming == True:
network.append(BatchNorm2D((3,7,7))) network.append(BatchNorm2D((3,7,7)))
for i in range(numLayers-1): for i in range(numLayers-1):
if i == 0: if i == 0:
network.append(Flatten()) network.append(Flatten())
network.append(Linear(neurons,neurons)) network.append(Linear(neurons,neurons))
network.append(Dropout(neurons,dropout)) network.append(Dropout(neurons,dropout))
network.append(Tanh()) network.append(Tanh())
if norming == True: if norming == True:
network.append(BatchNorm1D(neurons)) network.append(BatchNorm1D(neurons))
network.append(Linear(neurons,categories)) network.append(Linear(neurons,categories))
network.append(SoftMax()) network.append(SoftMax())
print(network) print(network)
``` ```
%% Output %% Output
(0) Flatten (0) Flatten
(1) Linear input size: 81 output size: 81 (1) Linear input size: 81 output size: 81
(2) Dropout size: 81 probability: 0.35 (2) Dropout size: 81 probability: 0.35
(3) Tanh (3) Tanh
(4) Linear input size: 81 output size: 81 (4) Linear input size: 81 output size: 81
(5) Dropout size: 81 probability: 0.35 (5) Dropout size: 81 probability: 0.35
(6) Tanh (6) Tanh
(7) Linear input size: 81 output size: 81 (7) Linear input size: 81 output size: 81
(8) Dropout size: 81 probability: 0.35 (8) Dropout size: 81 probability: 0.35
(9) Tanh (9) Tanh
(10) Linear input size: 81 output size: 81 (10) Linear input size: 81 output size: 81
(11) Dropout size: 81 probability: 0.35 (11) Dropout size: 81 probability: 0.35
(12) Tanh (12) Tanh
(13) Linear input size: 81 output size: 4 (13) Linear input size: 81 output size: 4
(14) SoftMax (14) SoftMax
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Loss and Optimizer ## Loss and Optimizer
Picking the loss function and initilizing the optimizer by handing over a list of layers. Picking the loss function and initilizing the optimizer by handing over a list of layers.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
loss = 'entropy' loss = 'entropy'
optimizer = 'rmsprop' optimizer = 'rmsprop'
learningRate = 0.001 learningRate = 0.001
momentum = 0.9 momentum = 0.9
schedulerName = 'expo' schedulerName = 'expo'
schedulerBool = False schedulerBool = False
decay = 0.9 decay = 0.9
stepSize = 10 stepSize = 10
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
if loss == 'entropy': if loss == 'entropy':
lossFunc = CrossEntropyLoss() lossFunc = CrossEntropyLoss()
elif loss == 'nllloss': elif loss == 'nllloss':
lossFunc = NLLLoss() lossFunc = NLLLoss()
elif loss == 'focal': elif loss == 'focal':
lossFunc = FocalLoss() lossFunc = FocalLoss()
if optimizer == 'sgd': if optimizer == 'sgd':
optim = SGD(network, learningRate) optim = SGD(network, learningRate)
elif optimizer == 'momentum': elif optimizer == 'momentum':
optim = SGDMomentum(network, learningRate, momentum) optim = SGDMomentum(network, learningRate, momentum)
elif optimizer == 'nesterov': elif optimizer == 'nesterov':
optim = NesterovMomentum(network, learningRate, momentum) optim = NesterovMomentum(network, learningRate, momentum)
elif optimizer == 'adagrad': elif optimizer == 'adagrad':
optim = AdaGrad(network, learningRate) optim = AdaGrad(network, learningRate)
elif optimizer == 'adadelta': elif optimizer == 'adadelta':
optim = AdaDelta(network, learningRate) optim = AdaDelta(network, learningRate)
elif optimizer == 'rmsprop': elif optimizer == 'rmsprop':
optim = RMSprop(network, learningRate) optim = RMSprop(network, learningRate)
elif optimizer == 'adam': elif optimizer == 'adam':
optim = Adam(network, learningRate) optim = Adam(network, learningRate)
if schedulerBool == True: if schedulerBool == True:
if schedulerName == 'expo': if schedulerName == 'expo':
scheduler = ExponentialLR(optim, decay) scheduler = ExponentialLR(optim, decay)
elif schedulerName == 'stepped': elif schedulerName == 'stepped':
scheduler = SteppedLR(optim, decay, stepSize) scheduler = SteppedLR(optim, decay, stepSize)
elif schedulerName == 'else': elif schedulerName == 'else':
scheduler = CyclicalLR(optim, 1/5, 15, 5) scheduler = CyclicalLR(optim, 1/5, 15, 5)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Running over Data ## Running over Data
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
epochs = 100 epochs = 100
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
metrics = NetworkObservables(epochs) metrics = NetworkObservables(epochs)
for i in range(epochs): for i in range(epochs):
data.trainMode() data.trainMode()
network.train() network.train()
length = len(data.trainSet) length = len(data.trainSet)
bar = Progressbar(f'epoch {str(i+1).zfill(len(str(epochs)))}/{epochs}', length) bar = Progressbar(f'epoch {str(i+1).zfill(len(str(epochs)))}/{epochs}', length)
for item in data.train: for item in data.train:
inputs = item['data'] inputs = item['data']
labels = item['labels'] labels = item['labels']
prediction = network(inputs) prediction = network(inputs)
loss = lossFunc(prediction, labels) loss = lossFunc(prediction, labels)
metrics.update('losses', loss, len(inputs) * (data.kFold - 1)) metrics.update('losses', loss, len(inputs) * (data.kFold - 1))
gradient = lossFunc.backward() gradient = lossFunc.backward()
optim.step(gradient) optim.step(gradient)
bar.step() bar.step()
metrics.update('learningRate', optim.learningRate) metrics.update('learningRate', optim.learningRate)
data.evalMode() data.evalMode()
network.eval() network.eval()
for item in data.train: for item in data.train:
inputs = item['data'] inputs = item['data']
labels = item['labels'] labels = item['labels']
prediction = network(inputs) prediction = network(inputs)
loss = lossFunc(prediction, labels) loss = lossFunc(prediction, labels)
metrics.update('validation', loss, len(inputs)) metrics.update('validation', loss, len(inputs))
accuracy = np.sum(prediction.argmax(1) == labels.argmax(1)) / len(prediction) accuracy = np.sum(prediction.argmax(1) == labels.argmax(1)) / len(prediction)
bar.step() bar.step()
if schedulerBool: if schedulerBool:
scheduler.step() scheduler.step()
metrics.update('accuracy', accuracy) metrics.update('accuracy', accuracy)
metrics.print() metrics.print()
metrics.step() metrics.step()
data.fold() data.fold()
``` ```
%% Output %% Output
epoch 001/100 |  | 00% epoch 001/100 |  | 00%
losses: 0.49729 learningRate: 0.001 validation: 0.09867 accuracy: 0.48438 losses: 0.49729 learningRate: 0.001 validation: 0.09867 accuracy: 0.48438
losses: 0.41132 learningRate: 0.001 validation: 0.0802 accuracy: 0.5 losses: 0.41132 learningRate: 0.001 validation: 0.0802 accuracy: 0.5
losses: 0.38994 learningRate: 0.001 validation: 0.10748 accuracy: 0.40625 losses: 0.38994 learningRate: 0.001 validation: 0.10748 accuracy: 0.40625
losses: 0.37619 learningRate: 0.001 validation: 0.07496 accuracy: 0.57031 losses: 0.37619 learningRate: 0.001 validation: 0.07496 accuracy: 0.57031
losses: 0.36574 learningRate: 0.001 validation: 0.07514 accuracy: 0.51562 losses: 0.36574 learningRate: 0.001 validation: 0.07514 accuracy: 0.51562
losses: 0.36658 learningRate: 0.001 validation: 0.0731 accuracy: 0.55469 losses: 0.36658 learningRate: 0.001 validation: 0.0731 accuracy: 0.55469
losses: 0.35791 learningRate: 0.001 validation: 0.07275 accuracy: 0.54688 losses: 0.35791 learningRate: 0.001 validation: 0.07275 accuracy: 0.54688
losses: 0.27411 learningRate: 0.001 validation: 0.03907 accuracy: 0.86719 losses: 0.27411 learningRate: 0.001 validation: 0.03907 accuracy: 0.86719
losses: 0.19936 learningRate: 0.001 validation: 0.02833 accuracy: 0.95312 losses: 0.19936 learningRate: 0.001 validation: 0.02833 accuracy: 0.95312
losses: 0.17114 learningRate: 0.001 validation: 0.08936 accuracy: 0.78906 losses: 0.17114 learningRate: 0.001 validation: 0.08936 accuracy: 0.78906
losses: 0.13173 learningRate: 0.001 validation: 0.00382 accuracy: 1.0 losses: 0.13173 learningRate: 0.001 validation: 0.00382 accuracy: 1.0
losses: 0.09931 learningRate: 0.001 validation: 0.00169 accuracy: 0.99219 losses: 0.09931 learningRate: 0.001 validation: 0.00169 accuracy: 0.99219
losses: 0.08937 learningRate: 0.001 validation: 0.00319 accuracy: 1.0 losses: 0.08937 learningRate: 0.001 validation: 0.00319 accuracy: 1.0
losses: 0.0929 learningRate: 0.001 validation: 0.00211 accuracy: 1.0 losses: 0.0929 learningRate: 0.001 validation: 0.00211 accuracy: 1.0
losses: 0.08361 learningRate: 0.001 validation: 0.00102 accuracy: 1.0 losses: 0.08361 learningRate: 0.001 validation: 0.00102 accuracy: 1.0
losses: 0.07231 learningRate: 0.001 validation: 0.00137 accuracy: 0.99219 losses: 0.07231 learningRate: 0.001 validation: 0.00137 accuracy: 0.99219
losses: 0.06781 learningRate: 0.001 validation: 0.00089 accuracy: 1.0 losses: 0.06781 learningRate: 0.001 validation: 0.00089 accuracy: 1.0
losses: 0.06132 learningRate: 0.001 validation: 0.00067 accuracy: 0.99219 losses: 0.06132 learningRate: 0.001 validation: 0.00067 accuracy: 0.99219
losses: 0.06359 learningRate: 0.001 validation: 0.00066 accuracy: 1.0 losses: 0.06359 learningRate: 0.001 validation: 0.00066 accuracy: 1.0
losses: 0.06332 learningRate: 0.001 validation: 0.00142 accuracy: 1.0 losses: 0.06332 learningRate: 0.001 validation: 0.00142 accuracy: 1.0
losses: 0.05916 learningRate: 0.001 validation: 0.00089 accuracy: 1.0 losses: 0.05916 learningRate: 0.001 validation: 0.00089 accuracy: 1.0
losses: 0.05291 learningRate: 0.001 validation: 0.00521 accuracy: 1.0 losses: 0.05291 learningRate: 0.001 validation: 0.00521 accuracy: 1.0
losses: 0.05816 learningRate: 0.001 validation: 0.00108 accuracy: 1.0 losses: 0.05816 learningRate: 0.001 validation: 0.00108 accuracy: 1.0
losses: 0.05007 learningRate: 0.001 validation: 0.005 accuracy: 0.97656 losses: 0.05007 learningRate: 0.001 validation: 0.005 accuracy: 0.97656
losses: 0.04786 learningRate: 0.001 validation: 0.00032 accuracy: 1.0 losses: 0.04786 learningRate: 0.001 validation: 0.00032 accuracy: 1.0
losses: 0.04677 learningRate: 0.001 validation: 0.00044 accuracy: 1.0 losses: 0.04677 learningRate: 0.001 validation: 0.00044 accuracy: 1.0
losses: 0.04867 learningRate: 0.001 validation: 0.00305 accuracy: 0.99219 losses: 0.04867 learningRate: 0.001 validation: 0.00305 accuracy: 0.99219
losses: 0.05613 learningRate: 0.001 validation: 0.00043 accuracy: 1.0 losses: 0.05613 learningRate: 0.001 validation: 0.00043 accuracy: 1.0
losses: 0.04576 learningRate: 0.001 validation: 0.00033 accuracy: 1.0 losses: 0.04576 learningRate: 0.001 validation: 0.00033 accuracy: 1.0
losses: 0.04427 learningRate: 0.001 validation: 0.00144 accuracy: 0.98438 losses: 0.04427 learningRate: 0.001 validation: 0.00144 accuracy: 0.98438
losses: 0.0448 learningRate: 0.001 validation: 0.0015 accuracy: 1.0 losses: 0.0448 learningRate: 0.001 validation: 0.0015 accuracy: 1.0
losses: 0.04478 learningRate: 0.001 validation: 0.00316 accuracy: 0.99219 losses: 0.04478 learningRate: 0.001 validation: 0.00316 accuracy: 0.99219
losses: 0.03432 learningRate: 0.001 validation: 0.00054 accuracy: 1.0 losses: 0.03432 learningRate: 0.001 validation: 0.00054 accuracy: 1.0
losses: 0.0372 learningRate: 0.001 validation: 0.10686 accuracy: 0.72656 losses: 0.0372 learningRate: 0.001 validation: 0.10686 accuracy: 0.72656
losses: 0.03999 learningRate: 0.001 validation: 0.01986 accuracy: 0.92188 losses: 0.03999 learningRate: 0.001 validation: 0.01986 accuracy: 0.92188
losses: 0.03515 learningRate: 0.001 validation: 0.13962 accuracy: 0.70312 losses: 0.03515 learningRate: 0.001 validation: 0.13962 accuracy: 0.70312
losses: 0.03808 learningRate: 0.001 validation: 0.00076 accuracy: 0.99219 losses: 0.03808 learningRate: 0.001 validation: 0.00076 accuracy: 0.99219
losses: 0.03753 learningRate: 0.001 validation: 0.00015 accuracy: 1.0 losses: 0.03753 learningRate: 0.001 validation: 0.00015 accuracy: 1.0
losses: 0.03672 learningRate: 0.001 validation: 0.00081 accuracy: 1.0 losses: 0.03672 learningRate: 0.001 validation: 0.00081 accuracy: 1.0
losses: 0.03007 learningRate: 0.001 validation: 0.00013 accuracy: 1.0 losses: 0.03007 learningRate: 0.001 validation: 0.00013 accuracy: 1.0
losses: 0.03121 learningRate: 0.001 validation: 0.00025 accuracy: 1.0 losses: 0.03121 learningRate: 0.001 validation: 0.00025 accuracy: 1.0
losses: 0.03534 learningRate: 0.001 validation: 0.00065 accuracy: 1.0 losses: 0.03534 learningRate: 0.001 validation: 0.00065 accuracy: 1.0
losses: 0.02849 learningRate: 0.001 validation: 0.00012 accuracy: 1.0 losses: 0.02849 learningRate: 0.001 validation: 0.00012 accuracy: 1.0
losses: 0.03147 learningRate: 0.001 validation: 0.00066 accuracy: 1.0 losses: 0.03147 learningRate: 0.001 validation: 0.00066 accuracy: 1.0
losses: 0.03631 learningRate: 0.001 validation: 0.00025 accuracy: 1.0 losses: 0.03631 learningRate: 0.001 validation: 0.00025 accuracy: 1.0
losses: 0.02836 learningRate: 0.001 validation: 0.00031 accuracy: 1.0 losses: 0.02836 learningRate: 0.001 validation: 0.00031 accuracy: 1.0
losses: 0.03343 learningRate: 0.001 validation: 0.0001 accuracy: 1.0 losses: 0.03343 learningRate: 0.001 validation: 0.0001 accuracy: 1.0
losses: 0.03032 learningRate: 0.001 validation: 0.00087 accuracy: 1.0 losses: 0.03032 learningRate: 0.001 validation: 0.00087 accuracy: 1.0
losses: 0.03198 learningRate: 0.001 validation: 0.00046 accuracy: 1.0 losses: 0.03198 learningRate: 0.001 validation: 0.00046 accuracy: 1.0
losses: 0.02753 learningRate: 0.001 validation: 0.00086 accuracy: 1.0 losses: 0.02753 learningRate: 0.001 validation: 0.00086 accuracy: 1.0
losses: 0.02652 learningRate: 0.001 validation: 0.00037 accuracy: 1.0 losses: 0.02652 learningRate: 0.001 validation: 0.00037 accuracy: 1.0
losses: 0.02677 learningRate: 0.001 validation: 0.00031 accuracy: 1.0 losses: 0.02677 learningRate: 0.001 validation: 0.00031 accuracy: 1.0
losses: 0.02311 learningRate: 0.001 validation: 0.01916 accuracy: 0.9375 losses: 0.02311 learningRate: 0.001 validation: 0.01916 accuracy: 0.9375
losses: 0.02575 learningRate: 0.001 validation: 0.00021 accuracy: 1.0 losses: 0.02575 learningRate: 0.001 validation: 0.00021 accuracy: 1.0
losses: 0.02486 learningRate: 0.001 validation: 0.00073 accuracy: 1.0 losses: 0.02486 learningRate: 0.001 validation: 0.00073 accuracy: 1.0
losses: 0.02223 learningRate: 0.001 validation: 0.00306 accuracy: 0.99219 losses: 0.02223 learningRate: 0.001 validation: 0.00306 accuracy: 0.99219
losses: 0.02786 learningRate: 0.001 validation: 0.00021 accuracy: 1.0 losses: 0.02786 learningRate: 0.001 validation: 0.00021 accuracy: 1.0
losses: 0.02345 learningRate: 0.001 validation: 0.00023 accuracy: 1.0 losses: 0.02345 learningRate: 0.001 validation: 0.00023 accuracy: 1.0
losses: 0.02363 learningRate: 0.001 validation: 0.00086 accuracy: 1.0 losses: 0.02363 learningRate: 0.001 validation: 0.00086 accuracy: 1.0
losses: 0.02441 learningRate: 0.001 validation: 0.00011 accuracy: 1.0 losses: 0.02441 learningRate: 0.001 validation: 0.00011 accuracy: 1.0
losses: 0.02292 learningRate: 0.001 validation: 0.00538 accuracy: 0.97656 losses: 0.02292 learningRate: 0.001 validation: 0.00538 accuracy: 0.97656
losses: 0.02007 learningRate: 0.001 validation: 0.0002 accuracy: 1.0 losses: 0.02007 learningRate: 0.001 validation: 0.0002 accuracy: 1.0
losses: 0.02292 learningRate: 0.001 validation: 0.00031 accuracy: 1.0 losses: 0.02292 learningRate: 0.001 validation: 0.00031 accuracy: 1.0
losses: 0.022 learningRate: 0.001 validation: 0.00015 accuracy: 1.0 losses: 0.022 learningRate: 0.001 validation: 0.00015 accuracy: 1.0
losses: 0.02332 learningRate: 0.001 validation: 0.00018 accuracy: 1.0 losses: 0.02332 learningRate: 0.001 validation: 0.00018 accuracy: 1.0
losses: 0.02117 learningRate: 0.001 validation: 0.00093 accuracy: 0.99219 losses: 0.02117 learningRate: 0.001 validation: 0.00093 accuracy: 0.99219
losses: 0.02278 learningRate: 0.001 validation: 0.00408 accuracy: 0.99219 losses: 0.02278 learningRate: 0.001 validation: 0.00408 accuracy: 0.99219
losses: 0.01921 learningRate: 0.001 validation: 0.00293 accuracy: 0.98438 losses: 0.01921 learningRate: 0.001 validation: 0.00293 accuracy: 0.98438
losses: 0.0217 learningRate: 0.001 validation: 0.00021 accuracy: 1.0 losses: 0.0217 learningRate: 0.001 validation: 0.00021 accuracy: 1.0
losses: 0.01917 learningRate: 0.001 validation: 0.00027 accuracy: 1.0 losses: 0.01917 learningRate: 0.001 validation: 0.00027 accuracy: 1.0
losses: 0.02403 learningRate: 0.001 validation: 0.00034 accuracy: 1.0 losses: 0.02403 learningRate: 0.001 validation: 0.00034 accuracy: 1.0
losses: 0.01793 learningRate: 0.001 validation: 0.00516 accuracy: 0.97656 losses: 0.01793 learningRate: 0.001 validation: 0.00516 accuracy: 0.97656
losses: 0.0206 learningRate: 0.001 validation: 0.00036 accuracy: 0.99219 losses: 0.0206 learningRate: 0.001 validation: 0.00036 accuracy: 0.99219
losses: 0.02188 learningRate: 0.001 validation: 0.00142 accuracy: 1.0 losses: 0.02188 learningRate: 0.001 validation: 0.00142 accuracy: 1.0
losses: 0.01368 learningRate: 0.001 validation: 0.00051 accuracy: 0.99219 losses: 0.01368 learningRate: 0.001 validation: 0.00051 accuracy: 0.99219
losses: 0.01971 learningRate: 0.001 validation: 0.00017 accuracy: 1.0 losses: 0.01971 learningRate: 0.001 validation: 0.00017 accuracy: 1.0
losses: 0.01667 learningRate: 0.001 validation: 0.0001 accuracy: 1.0 losses: 0.01667 learningRate: 0.001 validation: 0.0001 accuracy: 1.0
losses: 0.01897 learningRate: 0.001 validation: 0.0002 accuracy: 1.0 losses: 0.01897 learningRate: 0.001 validation: 0.0002 accuracy: 1.0
losses: 0.01696 learningRate: 0.001 validation: 0.00014 accuracy: 1.0 losses: 0.01696 learningRate: 0.001 validation: 0.00014 accuracy: 1.0
losses: 0.01589 learningRate: 0.001 validation: 0.04701 accuracy: 0.875 losses: 0.01589 learningRate: 0.001 validation: 0.04701 accuracy: 0.875
losses: 0.01759 learningRate: 0.001 validation: 9e-05 accuracy: 1.0 losses: 0.01759 learningRate: 0.001 validation: 9e-05 accuracy: 1.0
losses: 0.01656 learningRate: 0.001 validation: 0.05435 accuracy: 0.85156 losses: 0.01656 learningRate: 0.001 validation: 0.05435 accuracy: 0.85156
losses: 0.01878 learningRate: 0.001 validation: 0.00018 accuracy: 1.0 losses: 0.01878 learningRate: 0.001 validation: 0.00018 accuracy: 1.0
losses: 0.01652 learningRate: 0.001 validation: 0.00012 accuracy: 1.0 losses: 0.01652 learningRate: 0.001 validation: 0.00012 accuracy: 1.0
losses: 0.01365 learningRate: 0.001 validation: 0.00144 accuracy: 0.99219 losses: 0.01365 learningRate: 0.001 validation: 0.00144 accuracy: 0.99219
losses: 0.01495 learningRate: 0.001 validation: 0.01523 accuracy: 0.96094 losses: 0.01495 learningRate: 0.001 validation: 0.01523 accuracy: 0.96094
losses: 0.01864 learningRate: 0.001 validation: 0.00022 accuracy: 1.0 losses: 0.01864 learningRate: 0.001 validation: 0.00022 accuracy: 1.0
losses: 0.01784 learningRate: 0.001 validation: 0.00014 accuracy: 1.0 losses: 0.01784 learningRate: 0.001 validation: 0.00014 accuracy: 1.0
losses: 0.01584 learningRate: 0.001 validation: 0.00097 accuracy: 0.99219 losses: 0.01584 learningRate: 0.001 validation: 0.00097 accuracy: 0.99219
losses: 0.01781 learningRate: 0.001 validation: 0.00013 accuracy: 1.0 losses: 0.01781 learningRate: 0.001 validation: 0.00013 accuracy: 1.0
losses: 0.0147 learningRate: 0.001 validation: 0.00028 accuracy: 1.0 losses: 0.0147 learningRate: 0.001 validation: 0.00028 accuracy: 1.0
losses: 0.01458 learningRate: 0.001 validation: 0.0001 accuracy: 1.0 losses: 0.01458 learningRate: 0.001 validation: 0.0001 accuracy: 1.0
losses: 0.01514 learningRate: 0.001 validation: 0.00011 accuracy: 1.0 losses: 0.01514 learningRate: 0.001 validation: 0.00011 accuracy: 1.0
losses: 0.01568 learningRate: 0.001 validation: 0.00033 accuracy: 1.0 losses: 0.01568 learningRate: 0.001 validation: 0.00033 accuracy: 1.0
losses: 0.01703 learningRate: 0.001 validation: 0.00037 accuracy: 0.99219 losses: 0.01703 learningRate: 0.001 validation: 0.00037 accuracy: 0.99219
losses: 0.01226 learningRate: 0.001 validation: 0.00012 accuracy: 1.0 losses: 0.01226 learningRate: 0.001 validation: 0.00012 accuracy: 1.0
losses: 0.01413 learningRate: 0.001 validation: 0.00012 accuracy: 1.0 losses: 0.01413 learningRate: 0.001 validation: 0.00012 accuracy: 1.0
losses: 0.01076 learningRate: 0.001 validation: 0.00019 accuracy: 1.0 losses: 0.01076 learningRate: 0.001 validation: 0.00019 accuracy: 1.0
losses: 0.01424 learningRate: 0.001 validation: 7e-05 accuracy: 1.0 losses: 0.01424 learningRate: 0.001 validation: 7e-05 accuracy: 1.0
losses: 0.01674 learningRate: 0.001 validation: 0.00083 accuracy: 1.0 losses: 0.01674 learningRate: 0.001 validation: 0.00083 accuracy: 1.0
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig, ax = plt.subplots() fig, ax = plt.subplots()
ax3 = ax.twinx() ax3 = ax.twinx()
lns1 = ax.plot(metrics.losses.values, label='Train Loss') lns1 = ax.plot(metrics.losses.values, label='Train Loss')
lns2 = ax.plot(metrics.validation.values, label='Eval Loss') lns2 = ax.plot(metrics.validation.values, label='Eval Loss')
lns3 = ax.plot(metrics.accuracy.values, label='Accuracy') lns3 = ax.plot(metrics.accuracy.values, label='Accuracy')
lns4 = ax3.plot(metrics.learningRate.values, label='learning rate', color='tab:gray', ls='--') lns4 = ax3.plot(metrics.learningRate.values, label='learning rate', color='tab:gray', ls='--')
lns = lns1+lns2+lns3+lns4 lns = lns1+lns2+lns3+lns4
labs = [lab.get_label() for lab in lns] labs = [lab.get_label() for lab in lns]
ax.legend(lns, labs) ax.legend(lns, labs)
ax.grid(ls=':') ax.grid(ls=':')
``` ```
%% Output %% Output
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
confusion = ConfusionMatrix(categories) confusion = ConfusionMatrix(categories)
network.eval() network.eval()
length = len(data.eval) length = len(data.eval)
bar = Progressbar('evaluation', length) bar = Progressbar('evaluation', length)
for item in data.eval: for item in data.eval:
inputs = item['data'] inputs = item['data']
labels = item['labels'] labels = item['labels']
prediction = network(inputs) prediction = network(inputs)
confusion.update(prediction, labels) confusion.update(prediction, labels)
bar.step() bar.step()
confusion.percentages() confusion.percentages()
confusion.calcScores() confusion.calcScores()
``` ```
%% Output %% Output
evaluation |⣿⣿⣿⣿  | 08% evaluation |⣿⣿⣿⣿  | 08%
evaluation |⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿| done ✔[0m  | 96% evaluation |⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿| done ✔[0m  | 96%
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(confusion) print(confusion)
``` ```
%% Output %% Output
━━━━━━━━━━━━━━━━━━━━━━━━ evaluation ━━━━━━━━━━━━━━━━━━━━━━━━ ━━━━━━━━━━━━━━━━━━━━━━━━ evaluation ━━━━━━━━━━━━━━━━━━━━━━━━
————————————————————— confusion matrix ————————————————————— ————————————————————— confusion matrix —————————————————————
Class 0 Class 1 Class 2 Class 3 Class 0 Class 1 Class 2 Class 3
···························································· ····························································
Class 0 3171 0 29 0 Class 0 3171 0 29 0
24% 0% 0% 0% 24% 0% 0% 0%
···························································· ····························································
Class 1 0 3190 0 10 Class 1 0 3190 0 10
0% 24% 0% 0% 0% 24% 0% 0%
···························································· ····························································
Class 2 0 0 3200 0 Class 2 0 0 3200 0
0% 0% 25% 0% 0% 0% 25% 0%
···························································· ····························································
Class 3 0 0 0 3200 Class 3 0 0 0 3200
0% 0% 0% 25% 0% 0% 0% 25%
———————————————————————————————— scores ——————————————————————————————— ———————————————————————————————— scores ———————————————————————————————
accuracy precision sensitivity miss rate accuracy precision sensitivity miss rate
······································································· ·······································································
Class 0 0.998 1.0 0.991 0.009 Class 0 0.998 1.0 0.991 0.009
Class 1 0.999 1.0 0.997 0.003 Class 1 0.999 1.0 0.997 0.003
Class 2 0.998 0.991 1.0 0.0 Class 2 0.998 0.991 1.0 0.0
Class 3 0.999 0.997 1.0 0.0 Class 3 0.999 0.997 1.0 0.0
······································································· ·······································································
total 0.998 0.997 0.997 0.003 total 0.998 0.997 0.997 0.003
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Saving and Loading a Sequential ## Saving and Loading a Sequential
Sequentilas can be converted to dictionaries and then saved as a json file. This allows us to load them and re-use them. Also json is a raw text format, which is neat. Sequentilas can be converted to dictionaries and then saved as a json file. This allows us to load them and re-use them. Also json is a raw text format, which is neat.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
saver = ModelIO() saver = ModelIO()
saver.save(network, 'test') saver.save(network, 'test')
newNetwork = saver.load('test') newNetwork = saver.load('test')
print(newNetwork) print(newNetwork)
``` ```
%% Output %% Output
(0) Flatten (0) Flatten
(1) Linear input size: 81 output size: 81 (1) Linear input size: 81 output size: 81
(2) Dropout size: 81 probability: 0.35 (2) Dropout size: 81 probability: 0.35
(3) Tanh (3) Tanh
(4) Linear input size: 81 output size: 81 (4) Linear input size: 81 output size: 81
(5) Dropout size: 81 probability: 0.35 (5) Dropout size: 81 probability: 0.35
(6) Tanh (6) Tanh
(7) Linear input size: 81 output size: 81 (7) Linear input size: 81 output size: 81
(8) Dropout size: 81 probability: 0.35 (8) Dropout size: 81 probability: 0.35
(9) Tanh (9) Tanh
(10) Linear input size: 81 output size: 81 (10) Linear input size: 81 output size: 81
(11) Dropout size: 81 probability: 0.35 (11) Dropout size: 81 probability: 0.35
(12) Tanh (12) Tanh
(13) Linear input size: 81 output size: 4 (13) Linear input size: 81 output size: 4
(14) SoftMax (14) SoftMax
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
newConfusion = ConfusionMatrix(categories) newConfusion = ConfusionMatrix(categories)
newNetwork.eval() newNetwork.eval()
length = len(data.eval) length = len(data.eval)
bar = Progressbar('evaluation', length) bar = Progressbar('evaluation', length)
for item in data.eval: for item in data.eval:
inputs = item['data'] inputs = item['data']
labels = item['labels'] labels = item['labels']
prediction = newNetwork(inputs) prediction = newNetwork(inputs)
newConfusion.update(prediction, labels) newConfusion.update(prediction, labels)
bar.step() bar.step()
newConfusion.percentages() newConfusion.percentages()
newConfusion.calcScores() newConfusion.calcScores()
``` ```
%% Output %% Output
evaluation |⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿| done ✔[0m  | 96% evaluation |⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿| done ✔[0m  | 96%
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print(newConfusion) print(newConfusion)
``` ```
%% Output %% Output
━━━━━━━━━━━━━━━━━━━━━━━━ evaluation ━━━━━━━━━━━━━━━━━━━━━━━━ ━━━━━━━━━━━━━━━━━━━━━━━━ evaluation ━━━━━━━━━━━━━━━━━━━━━━━━
————————————————————— confusion matrix ————————————————————— ————————————————————— confusion matrix —————————————————————
Class 0 Class 1 Class 2 Class 3 Class 0 Class 1 Class 2 Class 3
···························································· ····························································
Class 0 3171 0 29 0 Class 0 3171 0 29 0
24% 0% 0% 0% 24% 0% 0% 0%
···························································· ····························································
Class 1 0 3190 0 10 Class 1 0 3190 0 10
0% 24% 0% 0% 0% 24% 0% 0%
···························································· ····························································
Class 2 0 0 3200 0 Class 2 0 0 3200 0
0% 0% 25% 0% 0% 0% 25% 0%
···························································· ····························································
Class 3 0 0 0 3200 Class 3 0 0 0 3200
0% 0% 0% 25% 0% 0% 0% 25%
———————————————————————————————— scores ——————————————————————————————— ———————————————————————————————— scores ———————————————————————————————
accuracy precision sensitivity miss rate accuracy precision sensitivity miss rate
······································································· ·······································································
Class 0 0.998 1.0 0.991 0.009 Class 0 0.998 1.0 0.991 0.009
Class 1 0.999 1.0 0.997 0.003 Class 1 0.999 1.0 0.997 0.003
Class 2 0.998 0.991 1.0 0.0 Class 2 0.998 0.991 1.0 0.0
Class 3 0.999 0.997 1.0 0.0 Class 3 0.999 0.997 1.0 0.0
······································································· ·······································································
total 0.998 0.997 0.997 0.003 total 0.998 0.997 0.997 0.003
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Comment ## Comment
The network works in principle and thanks to numpy, which is running on openblas, it even utilises multiple cores. I've added jupyter widgets to set network parameters. The network works in principle and thanks to numpy, which is running on openblas, it even utilises multiple cores. I've added jupyter widgets to set network parameters.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment