-
Notifications
You must be signed in to change notification settings - Fork 2
/
tinn.py
executable file
·98 lines (73 loc) · 2.77 KB
/
tinn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# -*- coding: utf-8 -*-
from typing import List
import math
import random
import pickle
class Tinn:
def __init__(self, nips: int, nhid: int, nops: int):
"""Build a new t object given number of inputs (nips), number of hidden neurons for the hidden layer (nhid), and number of outputs (nops)."""
self.nips = nips # number of inputs
self.nhid = nhid
self.nops = nops
self.b = [random.random() - 0.5 for _ in range(2)] # biases, Tinn only supports one hidden layer so there are two biases
self.x1 = [[0] * nips for _ in range(nhid)] # input to hidden layer
self.h = [0] * nhid # hidden layer
self.x2 = [[random.random() - 0.5 for _ in range(nhid)] for _ in range(nops)] # hidden to output layer weights
self.o = [0] * nops # output layer
def save(self, path):
"""Saves the t to disk."""
pickle.dump(self, open(path, 'wb'))
def xtload(path: str) -> Tinn:
"""Loads a new t from disk."""
return pickle.load(open(path, 'rb'))
def xttrain(t: Tinn, in_: float, tg: float, rate: float) -> float:
"""Trains a t with an input and target output with a learning rate. Returns error rate of the neural network."""
fprop(t, in_)
bprop(t, in_, tg, rate)
return toterr(tg, t.o)
def xtpredict(t: Tinn, in_: float) -> float:
"""Returns an output prediction given an input."""
fprop(t, in_)
return t.o
def err(a: float, b: float) -> float:
"""Error function."""
return 0.5 * (a - b) ** 2
def pderr(a: float, b: float) -> float:
"""Partial derivative of error function."""
return a - b
def toterr(tg: List[float], o: List[float]) -> float:
"""Total error."""
return sum([err(tg[i], o[i]) for i in range(len(o))])
def act(a: float) -> float:
"""Activation function."""
return 1 / (1 + math.exp(-a))
def pdact(a: float) -> float:
"""Partial derivative of activation function."""
return a * (1 - a)
def bprop(t: Tinn, in_: List[float], tg: float, rate: float) -> None:
"""Back propagation."""
for i in range(t.nhid):
s = 0
# Calculate total error change with respect to output.
for j in range(t.nops):
ab = pderr(t.o[j], tg[j]) * pdact(t.o[j])
s += ab * t.x2[j][i]
# Correct weights in hidden to output layer.
t.x2[j][i] -= rate * ab * t.h[i]
# Correct weights in input to hidden layer.
for j in range(t.nips):
t.x1[i][j] -= rate * s * pdact(t.h[i]) * in_[j]
def fprop(t: Tinn, in_: float) -> None:
"""Forward propagation."""
# Calculate hidden layer neuron values.
for i in range(t.nhid):
s = t.b[0] # start with bias
for j in range(t.nips):
s += in_[j] * t.x1[i][j]
t.h[i] = act(s)
# Calculate output layer neuron values.
for i in range(t.nops):
s = t.b[1] # start with bias
for j in range(t.nhid):
s += t.h[j] * t.x2[i][j]
t.o[i] = act(s)