-
Notifications
You must be signed in to change notification settings - Fork 31
/
Copy pathtfjs_xor_batching.js
144 lines (118 loc) · 3.34 KB
/
tfjs_xor_batching.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
const hiddenNumNeurons = 20;
const hidden2NumNeurons = 5;
const learningRate = 0.01;
const num_iterations = 100;
const batch_size = 20;
const weights = tf.variable(tf.randomNormal([2, hiddenNumNeurons]));
const biases = tf.variable(tf.zeros([hiddenNumNeurons]));
const weights2 = tf.variable(tf.randomNormal([hiddenNumNeurons, hidden2NumNeurons]));
const biases2 = tf.variable(tf.zeros([hidden2NumNeurons]));
const outWeights = tf.variable(tf.randomNormal([hidden2NumNeurons, 1]));
const outBias = tf.variable(tf.zeros([1]));
const optimizer = tf.train.adam(learningRate);
const epsilon = tf.scalar(1e-7);
const one = tf.scalar(1);
/*
* Given an input, have our model output a prediction
*/
function predict(input) {
return tf.tidy(() => {
const hidden = input.matMul(weights).add(biases).relu();
const hidden2 = hidden.matMul(weights2).add(biases2).relu();
const out = hidden2.matMul(outWeights).add(outBias).sigmoid().as1D();
return out;
});
}
/*
* Calculate the loss of our model's prediction vs the actual label
*/
function loss(prediction, actual) {
// Having a good error metric is key for training a machine learning model
return tf.tidy(() => {
return tf.add(
actual.mul(prediction.add(epsilon).log()),
one.sub(actual).mul(one.sub(prediction).add(epsilon).log()))
.mean()
.neg().asScalar();
});
}
/*
* This function trains our model asynchronously
*/
async function train(numIterations, done) {
for (let iter = 0; iter < numIterations; iter++) {
let xs, ys, cost;
[xs, ys] = getNRandomSamples(batch_size);
cost = tf.tidy(() => {
cost = optimizer.minimize(() => {
const pred = predict(tf.tensor2d(xs));
const pretfoss = loss(pred, tf.tensor1d(ys));
return pretfoss;
}, true);
return cost;
})
if (iter % 10 == 0) {
await cost.data().then((data) => console.log(`Iteration: ${iter} Loss: ${data}`));
}
await tf.nextFrame();
}
done();
}
/*
* This function calculates the accuracy of our model
*/
function test(xs, ys) {
tf.tidy(() => {
const predictedYs = xs.map((x) => Math.round(predict(tf.tensor2d(x, [1, 2])).dataSync()));
var predicted = 0;
for (let i = 0; i < xs.length; i++) {
if (ys[i] == predictedYs[i]) {
predicted++;
}
}
console.log(`Num correctly predicted: ${predicted} out of ${xs.length}`);
console.log(`Accuracy: ${predicted/xs.length}`);
})
}
/*
* This function returns a random sample and its corresponding label
*/
function getRandomSample() {
let x;
x = [Math.random()*2-1, Math.random()*2-1];
let y;
if (x[0] > 0 && x[1] > 0 || x[0] < 0 && x[1] < 0) {
y = 0;
} else {
y = 1;
}
return [x, y];
}
/*
* This function returns n random samples
*/
function getNRandomSamples(n) {
let xs = [];
let ys = [];
for (let iter = 0; iter < n; iter++) {
let x, y;
[x, y] = getRandomSample();
xs.push(x);
ys.push(y);
}
return [xs, ys];
}
let testX, testY;
[testX, testY] = getNRandomSamples(100);
// Test before training
console.log(`Before training: `);
test(testX, testY);
console.log('=============');
console.log(`Training ${num_iterations} epochs...`);
// Train, then test right after
train(num_iterations, () => {
console.log('=============');
console.log(
`After training:`)
test(testX, testY);
});