| Line | Hits | Source |
|---|---|---|
| 1 | 1 | var CostFunction = require('./interface'); |
| 2 | 1 | var util = require('util'); |
| 3 | ||
| 4 | /** | |
| 5 | The cross-entropy cost function for a single pair of values. | |
| 6 | wiki: http://en.wikipedia.org/wiki/Cross_entropy | |
| 7 | ||
| 8 | @class | |
| 9 | @implements CostFunction | |
| 10 | */ | |
| 11 | function CrossEntropy () { | |
| 12 | 1 | return CostFunction.apply(this, arguments); |
| 13 | } | |
| 14 | ||
| 15 | 1 | util.inherits(CrossEntropy, CostFunction); |
| 16 | ||
| 17 | 1 | CrossEntropy.prototype.fn = function (output, target) { |
| 18 | 1 | var result = -target * Math.log(output) - (1 - target) * Math.log(1 - output); |
| 19 | 1 | return isNaN(result) ? 0 : result; |
| 20 | }; | |
| 21 | ||
| 22 | 1 | CrossEntropy.prototype.delta = function (inputs, output, target) { |
| 23 | 2 | return (output - target); |
| 24 | }; | |
| 25 | ||
| 26 | 1 | module.exports = CrossEntropy; |
| 27 |
| Line | Hits | Source |
|---|---|---|
| 1 | /** | |
| 2 | Cost functions for neural networks. | |
| 3 | ||
| 4 | @file | |
| 5 | */ | |
| 6 | 1 | exports.CrossEntropy = require('./crossentropy'); |
| 7 | 1 | exports.MeanSquaredError = require('./meansquarederror'); |
| 8 | 1 | exports.LogLikelihood = require('./loglikelihood'); |
| 9 |
| Line | Hits | Source |
|---|---|---|
| 1 | /** | |
| 2 | A generic interface for cost functions. | |
| 3 | ||
| 4 | @class | |
| 5 | @interface | |
| 6 | */ | |
| 7 | function CostFunction () {} | |
| 8 | ||
| 9 | 1 | CostFunction.prototype = { |
| 10 | /** | |
| 11 | TODO doc CostFunction.fn | |
| 12 | ||
| 13 | @function | |
| 14 | @param {number} output - the actual output value | |
| 15 | @param {number} target - the desired output value | |
| 16 | @returns {number} the cost associated with the output and target | |
| 17 | */ | |
| 18 | fn: function (output, target) { | |
| 19 | 0 | throw new Error("Not Implemented"); |
| 20 | }, | |
| 21 | /** | |
| 22 | TODO doc CostFunction.delta | |
| 23 | ||
| 24 | @function | |
| 25 | @param {array} input - the array of input values | |
| 26 | @param {number} output - the actual output value | |
| 27 | @param {number} target - the desired output value | |
| 28 | @returns {number} the error delta associated with the output and target | |
| 29 | */ | |
| 30 | delta: function (input, output, target) { | |
| 31 | 0 | throw new Error("Not Implemented"); |
| 32 | } | |
| 33 | }; | |
| 34 | ||
| 35 | 1 | module.exports = CostFunction; |
| 36 |
| Line | Hits | Source |
|---|---|---|
| 1 | 1 | var CostFunction = require('./interface'); |
| 2 | 1 | var util = require('util'); |
| 3 | ||
| 4 | /** | |
| 5 | TODO doc LogLikelihood | |
| 6 | ||
| 7 | @class | |
| 8 | @implements CostFunction | |
| 9 | */ | |
| 10 | function LogLikelihood () {} | |
| 11 | ||
| 12 | 1 | util.inherits(LogLikelihood, CostFunction); |
| 13 | ||
| 14 | /** | |
| 15 | TODO doc LogLikelihood.fn | |
| 16 | ||
| 17 | @function | |
| 18 | */ | |
| 19 | 1 | LogLikelihood.prototype.fn = function (output, target) { |
| 20 | 1 | return -1 * Math.log(1 - Math.abs(output - target)); |
| 21 | }; | |
| 22 | ||
| 23 | ||
| 24 | /** | |
| 25 | TODO doc LogLikelihood.delta | |
| 26 | ||
| 27 | @function | |
| 28 | */ | |
| 29 | 1 | LogLikelihood.prototype.delta = function (input, output, target) { |
| 30 | 2 | return output - target; |
| 31 | }; | |
| 32 | ||
| 33 | 1 | module.exports = LogLikelihood; |
| 34 |
| Line | Hits | Source |
|---|---|---|
| 1 | 1 | var math = require('mathjs'); |
| 2 | 1 | var CostFunction = require('./interface'); |
| 3 | 1 | var util = require('util'); |
| 4 | ||
| 5 | /** | |
| 6 | The mean squared error cost function for a single pair of values. | |
| 7 | wiki: http://en.wikipedia.org/wiki/Mean_squared_error | |
| 8 | ||
| 9 | @class | |
| 10 | @implements CostFunction | |
| 11 | */ | |
| 12 | function MeanSquaredError () { | |
| 13 | 3 | return CostFunction.apply(this, arguments); |
| 14 | } | |
| 15 | ||
| 16 | 1 | util.inherits(MeanSquaredError, CostFunction); |
| 17 | ||
| 18 | 1 | MeanSquaredError.prototype.fn = function (output, target) { |
| 19 | 2170 | return 0.5 * Math.pow(math.norm(output - target), 2); |
| 20 | }; | |
| 21 | ||
| 22 | 1 | MeanSquaredError.prototype.delta = function (inputs, output, target) { |
| 23 | 2174 | var error = output - target; |
| 24 | 2174 | return inputs.map(function (input) { |
| 25 | 4350 | var z = 1 / (1 + Math.exp(-input)); // sigmoid prime value |
| 26 | 4350 | return z * (1 - z) * error; |
| 27 | }).reduce(function (a, b) { | |
| 28 | 4350 | return a + b; |
| 29 | }, 0); | |
| 30 | }; | |
| 31 | ||
| 32 | 1 | module.exports = MeanSquaredError; |
| 33 |
| Line | Hits | Source |
|---|---|---|
| 1 | 1 | var neurons = require('../neurons'); |
| 2 | 1 | var _ = require('underscore'); |
| 3 | ||
| 4 | /** | |
| 5 | A single layer of neurons. Networks process input through multiple layers. | |
| 6 | ||
| 7 | @class | |
| 8 | @param {number} size - Number of neurons in the layer. | |
| 9 | @param {number} input_size - Length of input arrays. | |
| 10 | @param {object} Neuron - (optional) Class used to initialize neurons. Defaults to SigmoidNeuron. | |
| 11 | */ | |
| 12 | function Layer (size, input_size, Neuron) { | |
| 13 | 11 | Neuron = Neuron || neurons.SigmoidNeuron; |
| 14 | 11 | this._neurons = _.range(size).map(function (i) { |
| 15 | 41 | var weights, bias; |
| 16 | if (i === 0) { | |
| 17 | 48 | weights = _.range(input_size).map(function () { return 0; }); |
| 18 | 11 | bias = 0; |
| 19 | } else { | |
| 20 | 126 | weights = _.range(input_size).map(function () { return _.random(-1, 1); }); |
| 21 | 30 | bias = _.random(-1, 1); |
| 22 | } | |
| 23 | 41 | return new Neuron(weights, bias); |
| 24 | }); | |
| 25 | } | |
| 26 | ||
| 27 | /** | |
| 28 | Getter / setter for the layer's neurons. | |
| 29 | ||
| 30 | @function | |
| 31 | @param {array} neurons - (optional) A list of neurons to replace the layer's current neurons. If undefined, returns the list of current neurons. | |
| 32 | */ | |
| 33 | 1 | Layer.prototype.neurons = function (neurons) { |
| 34 | if (neurons === undefined) | |
| 35 | return this._neurons; | |
| 36 | else | |
| 37 | this._neurons = neurons; | |
| 38 | }; | |
| 39 | ||
| 40 | /** | |
| 41 | Getter / setter for the layer's neurons, individually. | |
| 42 | ||
| 43 | @function | |
| 44 | @param {number} i - Index of the neuron to get or set. | |
| 45 | @param {object} neuron - (optional) A single neuron, which replaces the i'th neuron. If undefined, returns the i'th neuron. | |
| 46 | */ | |
| 47 | 1 | Layer.prototype.neuron = function (i, neuron) { |
| 48 | if (neuron === undefined) | |
| 49 | return this._neurons[i]; | |
| 50 | else | |
| 51 | this._neurons[i] = neuron; | |
| 52 | }; | |
| 53 | ||
| 54 | /** | |
| 55 | Given an array of input bits, returns the layer's output | |
| 56 | when the input is applied to each neuron. | |
| 57 | ||
| 58 | @function | |
| 59 | @param {array} input - array of input bits | |
| 60 | */ | |
| 61 | 1 | Layer.prototype.process = function (input) { |
| 62 | 2193 | var self = this; |
| 63 | 2193 | return self.neurons().map(function (neuron) { |
| 64 | 7311 | return neuron.process(input); |
| 65 | }); | |
| 66 | }; | |
| 67 | ||
| 68 | 1 | module.exports = Layer; |
| 69 |
| Line | Hits | Source |
|---|---|---|
| 1 | 1 | exports.Layer = require('./basic'); |
| 2 | 1 | exports.SoftmaxLayer = require('./softmax'); |
| 3 |
| Line | Hits | Source |
|---|---|---|
| 1 | 1 | var util = require('util'); |
| 2 | 1 | var SoftmaxNeuron = require('../neurons').SoftmaxNeuron; |
| 3 | 1 | var Layer = require('./basic'); |
| 4 | 1 | var _ = require('underscore'); |
| 5 | ||
| 6 | /** | |
| 7 | TODO doc SoftmaxLayer | |
| 8 | ||
| 9 | @class | |
| 10 | @implements Layer | |
| 11 | */ | |
| 12 | function SoftmaxLayer (size, input_size) { | |
| 13 | 2 | return Layer.call(this, size, input_size, SoftmaxNeuron); |
| 14 | } | |
| 15 | ||
| 16 | 1 | util.inherits(SoftmaxLayer, Layer); |
| 17 | ||
| 18 | ||
| 19 | /** | |
| 20 | TODO doc SoftmaxLayer.process | |
| 21 | ||
| 22 | @function | |
| 23 | @param {array} inputs - an array of input bits | |
| 24 | @returns {array} array of processed output bits | |
| 25 | */ | |
| 26 | 1 | SoftmaxLayer.prototype.process = function (inputs) { |
| 27 | 2 | var results = this.neurons().map(function (neuron, j) { |
| 28 | 10 | return neuron.process(inputs, j); |
| 29 | }); | |
| 30 | 10 | var sum = results.reduce(function (a, b) { return a + b; }); |
| 31 | 12 | return results.map(function (result) { return result / sum; }); |
| 32 | }; | |
| 33 | ||
| 34 | 1 | module.exports = SoftmaxLayer; |
| 35 |
| Line | Hits | Source |
|---|---|---|
| 1 | 1 | exports.Network = require('./interface'); |
| 2 | 1 | exports.StochasticNetwork = require('./stochastic'); |
| 3 |
| Line | Hits | Source |
|---|---|---|
| 1 | 1 | var _ = require('underscore'); |
| 2 | 1 | var neurons = require('../neurons'); |
| 3 | 1 | var costfunctions = require('../costfunctions'); |
| 4 | 1 | var layers = require('../layers'); |
| 5 | ||
| 6 | /** | |
| 7 | Generic neural network. Subclasses must implement a `_train` method | |
| 8 | to adjust weights and biases based on training data and a learning rate. | |
| 9 | ||
| 10 | @class | |
| 11 | @interface | |
| 12 | @param {array} sizes - list of sizes for each layer, ex: [2, 3, 1] -> 2-neuron layer, 3-neuron layer, 1-neuron layer. | |
| 13 | @param {object} layer - (optional) layer class to use. Defaults to the basic Layer. | |
| 14 | @param {object} cost - (optional) cost function to use. Defaults to MeanSquaredError. | |
| 15 | */ | |
| 16 | function Network (sizes, layer, cost) { | |
| 17 | 2 | var self = this; |
| 18 | 2 | this._layer = layer || layers.Layer; |
| 19 | 2 | this._cost = cost ? new cost() : new costfunctions.MeanSquaredError(); |
| 20 | 2 | this._layers = sizes.map(function (size, i) { |
| 21 | 6 | return new self._layer(size, sizes[i-1] || size); |
| 22 | }); | |
| 23 | } | |
| 24 | ||
| 25 | /** | |
| 26 | Getter / setter for the network's neural layers. | |
| 27 | ||
| 28 | @function | |
| 29 | @param {array} layers - (optional) A list of layers to replace the network's current layers. If undefined, returns the list of current layers. | |
| 30 | */ | |
| 31 | 1 | Network.prototype.layers = function (layers) { |
| 32 | if (layers === undefined) | |
| 33 | return this._layers; | |
| 34 | else | |
| 35 | this._layers = layers; | |
| 36 | }; | |
| 37 | ||
| 38 | /** | |
| 39 | Getter / setter for the network's neural layers, individually. | |
| 40 | ||
| 41 | @function | |
| 42 | @param {number} i - Index of the layer to get or set. | |
| 43 | @param {object} layer - (optional) A single layer, which replaces the i'th layer. If undefined, returns the i'th layer. | |
| 44 | */ | |
| 45 | 1 | Network.prototype.layer = function (i, layer) { |
| 46 | if (layer === undefined) | |
| 47 | return this._layers[i]; | |
| 48 | else | |
| 49 | this._layers[i] = layer; | |
| 50 | }; | |
| 51 | ||
| 52 | /** | |
| 53 | Applies an array of input bits to each layer, | |
| 54 | feeding its output into the next layer, | |
| 55 | and returning the final layer's output. | |
| 56 | ||
| 57 | @function | |
| 58 | @param {array} input - array of input bits. | |
| 59 | @returns {array} array of output bits | |
| 60 | */ | |
| 61 | 1 | Network.prototype.process = function (input) { |
| 62 | 5 | return this.layers().reduce(function (input, layer) { |
| 63 | 15 | return layer.process(input); |
| 64 | }, input); | |
| 65 | }; | |
| 66 | ||
| 67 | /** | |
| 68 | Calculates the network's output at each layer | |
| 69 | for the given array of input values. | |
| 70 | ||
| 71 | @function | |
| 72 | @param {array} input - array of input bits | |
| 73 | @returns {array} array of arrays of output bits for each layer | |
| 74 | */ | |
| 75 | 1 | Network.prototype._feedforward = function (input) { |
| 76 | 725 | var outputs = []; |
| 77 | 725 | var layers = this.layers(); |
| 78 | for (var i = 0; i < layers.length; i++) { | |
| 79 | 2175 | output = layers[i].process(input); |
| 80 | 2175 | outputs.push(output); |
| 81 | 2175 | input = output; |
| 82 | } | |
| 83 | 725 | return outputs; |
| 84 | }; | |
| 85 | ||
| 86 | /* | |
| 87 | Given a learning rate, a matrix of inputs, and a matrix of deltas, | |
| 88 | adjusts weights and biases for every neuron in every layer of the network. | |
| 89 | ||
| 90 | @function | |
| 91 | @param {number} learning_rate - A small, positive number. | |
| 92 | @param {array} outputs - [i][j] matrix of outputs for every jth neuron of every ith layers | |
| 93 | @param {array} deltas - [i][j] matrix of deltas for every jth neuron of every ith layer | |
| 94 | **/ | |
| 95 | 1 | Network.prototype._adjust_network = function (learning_rate, outputs, deltas) { |
| 96 | 723 | var self = this; |
| 97 | 723 | var num_layers = this.layers().length; |
| 98 | for (var i = 1; i < num_layers; i++) { | |
| 99 | // input size for the current layer === # of neurons in previous layer | |
| 100 | 1446 | var layer = this.layer(i); |
| 101 | 1446 | var input_size = this.layer(i-1).neurons().length; |
| 102 | 1446 | var layer_size = layer.neurons().length; |
| 103 | for (var j = 0; j < layer_size; j++) { | |
| 104 | 5784 | var neuron = layer.neuron(j); |
| 105 | 5784 | var delta = deltas[i-1][j]; |
| 106 | 5784 | var output = outputs[i][j]; |
| 107 | for (var k = 0; k < input_size; k++) { | |
| 108 | 18075 | var change = learning_rate * delta * output; |
| 109 | 18075 | neuron.weight(k, neuron.weight(k) + change); |
| 110 | } | |
| 111 | 5784 | var bias = neuron.bias() + (learning_rate * delta); |
| 112 | 5784 | neuron.bias(bias); |
| 113 | } | |
| 114 | } | |
| 115 | }; | |
| 116 | ||
| 117 | /** | |
| 118 | Given outputs for each layer and the desired network output, | |
| 119 | the network calculates modifications for its weights and biases | |
| 120 | that will ensure it yields the proper output for the given input. | |
| 121 | ||
| 122 | @function | |
| 123 | @param {array} outputs - [i][j] matrix of output bits for every jth neuron of every ith layer | |
| 124 | @param {array} target - array of bits representing desired network output | |
| 125 | @returns {array} [i][j] matrix of deltas for every jth neuron of every ith layer | |
| 126 | */ | |
| 127 | 1 | Network.prototype._calculate_deltas = function (input, outputs, target) { |
| 128 | 724 | var self = this; |
| 129 | // 1. output errors for the output layer and | |
| 130 | // 2. backpropogate the errors through prior layers | |
| 131 | 724 | var errors = []; |
| 132 | 724 | var deltas = []; |
| 133 | 724 | var num_layers = this.layers().length - 1; |
| 134 | for (var i = num_layers; i > 0; i--) { | |
| 135 | 1448 | var layer = this.layer(i); |
| 136 | 1448 | var layer_size = layer.neurons().length; |
| 137 | // add a new layer to errors and deltas for every layer we deal with | |
| 138 | 1448 | errors.unshift([]); |
| 139 | 1448 | deltas.unshift([]); |
| 140 | // calculate errors for each neuron in each layer | |
| 141 | for (var j = 0; j < layer_size; j++) { | |
| 142 | 5792 | var neuron = layer.neuron(j); |
| 143 | 5792 | var output = outputs[i][j]; |
| 144 | 5792 | var error = 0; |
| 145 | if (i === num_layers) { | |
| 146 | // output layer | |
| 147 | 2172 | error -= self._cost.delta(input, output, target[j]); |
| 148 | } else { | |
| 149 | // hidden layers | |
| 150 | for (var k = 0; k < deltas[1].length; k++) { | |
| 151 | // compute the delta for each weight of the next layer | |
| 152 | // corresponding to each bit of this layer's output | |
| 153 | 10860 | error -= self.layer(i+1).neuron(k).weight(j) * deltas[1][k]; |
| 154 | } | |
| 155 | } | |
| 156 | 5792 | errors[0][j] = error; |
| 157 | 5792 | deltas[0][j] = error * output * (1 - output); |
| 158 | } | |
| 159 | } | |
| 160 | 724 | return [errors, deltas]; |
| 161 | }; | |
| 162 | ||
| 163 | /** | |
| 164 | The dreaded backpropogation algorithm. | |
| 165 | First, calculates output from each layer of the network. | |
| 166 | Then, calculates error rates for each neuron of each layer. | |
| 167 | Lastly, uses those outputs, deltas, and a specified learning rate to adjust the weights and biases of the network. | |
| 168 | ||
| 169 | @function | |
| 170 | @param {array} input - array of input bits to the network | |
| 171 | @param {array} target - array of desired output bits from the network for the given input | |
| 172 | @param {number} learning_rate - (optional) value between 0 and 1 representing how quickly the network learns. Defaults to 0.3. | |
| 173 | @returns {array} deltas for each neuron of each layer in the network, besides the input layer | |
| 174 | */ | |
| 175 | 1 | Network.prototype._backpropogate = function (input, target, learning_rate) { |
| 176 | // 1. feedforward | |
| 177 | 723 | var outputs = this._feedforward(input); |
| 178 | // 2. calculate deltas | |
| 179 | 723 | var results = this._calculate_deltas(input, outputs, target); |
| 180 | 723 | var errors = results[0]; |
| 181 | 723 | var deltas = results[1]; |
| 182 | // 3. adjust network | |
| 183 | 723 | this._adjust_network(learning_rate, outputs, deltas); |
| 184 | ||
| 185 | 723 | return [outputs, errors]; |
| 186 | }; | |
| 187 | ||
| 188 | /** | |
| 189 | Given training data, a number of epochs, and a learning rate, | |
| 190 | trains the network to more accurately predict | |
| 191 | correct outputs for given inputs. | |
| 192 | ||
| 193 | The second parameter accepts an object containing custom training settings, specifically: | |
| 194 | * epochs: number of rounds to train against the data. Defaults to 20,000. | |
| 195 | * learning_rate: value between 0 and 1 representing how quickly the network learns. Defaults to 0.3. | |
| 196 | * threshold: error threshold. if the network attains an error rate under this threshold, it stops training early. Defaults to 0.005. | |
| 197 | ||
| 198 | @function | |
| 199 | @param {array} training_data - array of [input, correct_output] pairs used to train the network | |
| 200 | @param {object} options - options object. | |
| 201 | @returns {array} [i, error] where i is the number of iterations it took to reach the returned error rate | |
| 202 | */ | |
| 203 | 1 | Network.prototype.train = function (training_data, opts) { |
| 204 | 1 | opts = opts || {}; |
| 205 | 1 | var self = this; |
| 206 | 1 | var error_threshold = opts.threshold || Math.pow(10, -3) * 5; |
| 207 | 1 | var epochs = opts.epochs || Math.pow(10, 4) * 2; |
| 208 | 1 | var learning_rate = opts.learning_rate || 0.3; |
| 209 | 1 | var error = 1; |
| 210 | ||
| 211 | for (var i = 0; i < epochs && error > error_threshold; i++) { | |
| 212 | 723 | error = 0; |
| 213 | for (var j = 0; j < training_data.length; j++) { | |
| 214 | 723 | var input = training_data[j][0]; |
| 215 | 723 | var target = training_data[j][1]; |
| 216 | 723 | var result = this._backpropogate(input, target, learning_rate); |
| 217 | 723 | var outputs = result[0]; |
| 218 | 723 | var network_output = outputs[outputs.length - 1]; |
| 219 | // aggregate errors only from the output layer | |
| 220 | for (var k = 0; k < network_output.length; k++) { | |
| 221 | 2169 | error += this._cost.fn(network_output[k], target[k]); |
| 222 | } | |
| 223 | } | |
| 224 | 723 | error = error / training_data.length; |
| 225 | } | |
| 226 | ||
| 227 | 1 | return [i, error]; |
| 228 | }; | |
| 229 | ||
| 230 | 1 | module.exports = Network; |
| 231 |
| Line | Hits | Source |
|---|---|---|
| 1 | 1 | var _ = require('underscore'); |
| 2 | 1 | var Network = require('./interface'); |
| 3 | 1 | var util = require('util'); |
| 4 | ||
| 5 | /** | |
| 6 | Network that samples training data | |
| 7 | into mini batches, in order to learn | |
| 8 | more quickly with less processing | |
| 9 | without sacrificing significant accuracy. | |
| 10 | ||
| 11 | @class | |
| 12 | @implements Network | |
| 13 | */ | |
| 14 | function StochasticNetwork () { | |
| 15 | 0 | return Network.apply(this, arguments); |
| 16 | } | |
| 17 | ||
| 18 | 1 | util.inherits(StochasticNetwork, Network); |
| 19 | ||
| 20 | /* | |
| 21 | Stochastic training function. | |
| 22 | Breaks training data into small batches | |
| 23 | and learns from them one batch at a time. | |
| 24 | ||
| 25 | @function | |
| 26 | @param {array} training_data - [input, output] pairs used to guide learning. | |
| 27 | @param {number} epochs - Number of rounds to train against the data. | |
| 28 | @param {number} learning_rate - Small, positive number (ex: 0.3) indicating the rate of learning. | |
| 29 | @param {number} batch_size - Size of batches of training data to train against at a time. | |
| 30 | **/ | |
| 31 | 1 | StochasticNetwork.prototype.train = function (training_data, epochs, learning_rate, batch_size) { |
| 32 | 0 | epochs = epochs || 300; |
| 33 | 0 | learning_rate = learning_rate || 0.3; |
| 34 | 0 | batch_size = batch_size || 10; |
| 35 | ||
| 36 | for (var i = 0; i < epochs; i++) { | |
| 37 | 0 | var batch = _.sample(training_data, batch_size); |
| 38 | for (var j = 0; j < batch.length; j++) { | |
| 39 | 0 | var data = batch[j]; |
| 40 | 0 | this._backpropogate(data[0], data[1], learning_rate); |
| 41 | } | |
| 42 | } | |
| 43 | }; | |
| 44 | ||
| 45 | 1 | module.exports = StochasticNetwork; |
| 46 |
| Line | Hits | Source |
|---|---|---|
| 1 | 1 | exports.Perceptron = require('./perceptron'); |
| 2 | 1 | exports.SigmoidNeuron = require('./sigmoid'); |
| 3 |
| Line | Hits | Source |
|---|---|---|
| 1 | /** | |
| 2 | A generic interface for Neurons. | |
| 3 | It includes getters and setters for a neuron's weight and biases. | |
| 4 | ||
| 5 | Classes inheriting from this interface should implement a `_process` | |
| 6 | function that takes an array of bits of size equal to the neuron's | |
| 7 | list of weights. | |
| 8 | ||
| 9 | @class | |
| 10 | @interface | |
| 11 | @param {array} weights - An array of weights to apply to input bits. | |
| 12 | @param {number} bias - A number added to the product of input bits and their weights. | |
| 13 | */ | |
| 14 | function Neuron (weights, bias) { | |
| 15 | 149 | this._weights = weights; |
| 16 | 149 | this._bias = bias; |
| 17 | } | |
| 18 | ||
| 19 | 1 | Neuron.prototype = { |
| 20 | /** | |
| 21 | Get or set the entire array of the neuron's weights. | |
| 22 | @function | |
| 23 | @param {array} weights - The new list of weights. If undefined, returns the current list of weights. | |
| 24 | */ | |
| 25 | weights: function (weights) { | |
| 26 | if (weights === undefined) | |
| 27 | return this._weights; | |
| 28 | else | |
| 29 | this._weights = weights; | |
| 30 | }, | |
| 31 | /** | |
| 32 | Get or set the neuron's weight value for a given input bit. | |
| 33 | @function | |
| 34 | @param {number} i - The index of the weight to get or set. | |
| 35 | @param {number} n - The new value for the specified weight. If undefined, returns the current weight value. | |
| 36 | */ | |
| 37 | weight: function (i, n) { | |
| 38 | if (n === undefined) | |
| 39 | return this._weights[i]; | |
| 40 | else | |
| 41 | this._weights[i] = n; | |
| 42 | }, | |
| 43 | /** | |
| 44 | Get or set the Perceptron's bias value. | |
| 45 | @function | |
| 46 | @param {number} n - The new bias value. If undefined, returns the current bias value. | |
| 47 | */ | |
| 48 | bias: function (n) { | |
| 49 | if (n === undefined) | |
| 50 | return this._bias; | |
| 51 | else | |
| 52 | this._bias = n; | |
| 53 | }, | |
| 54 | /** | |
| 55 | Given an array of input bits, return the neuron's output. | |
| 56 | If the array of input bits is a different length than | |
| 57 | the neuron's list of weights, it will throw an error. | |
| 58 | If `_process` is unimplemented, it will also throw an error. | |
| 59 | ||
| 60 | @function | |
| 61 | @param {array} input - An array of input bits. | |
| 62 | */ | |
| 63 | process: function (input) { | |
| 64 | if (input.length !== this.weights().length) | |
| 65 | throw new Error("input.length does not match weights.length: " + input.length + ' !== ' + this.weights().length); | |
| 66 | else if (!this._process) | |
| 67 | throw new Error("Not Implemented"); | |
| 68 | else | |
| 69 | return this._process.apply(this, arguments); | |
| 70 | } | |
| 71 | }; | |
| 72 | ||
| 73 | 1 | module.exports = Neuron; |
| 74 |
| Line | Hits | Source |
|---|---|---|
| 1 | 1 | var util = require('util'); |
| 2 | 1 | var Neuron = require('./interface'); |
| 3 | ||
| 4 | /** | |
| 5 | The Perceptron is a type of neuron that | |
| 6 | sums the product of input values and | |
| 7 | their respective weights. If that sum, | |
| 8 | plus the Perceptron's "bias", is | |
| 9 | greater than 0, it outputs 1. | |
| 10 | Else, it outputs 0. | |
| 11 | ||
| 12 | @class | |
| 13 | @implements Neuron | |
| 14 | */ | |
| 15 | function Perceptron () { | |
| 16 | 7 | return Neuron.apply(this, arguments); |
| 17 | } | |
| 18 | ||
| 19 | 1 | util.inherits(Perceptron, Neuron); |
| 20 | ||
| 21 | /** | |
| 22 | Given an array of input bits, | |
| 23 | return 0 if | |
| 24 | the product of input bits and weights | |
| 25 | plus the neuron's bias | |
| 26 | if greater than 0. | |
| 27 | otherwise, returns 1. | |
| 28 | ||
| 29 | @function | |
| 30 | @param {array} input - An array of input bits. | |
| 31 | */ | |
| 32 | 1 | Perceptron.prototype._process = function (input) { |
| 33 | 44 | var self = this; |
| 34 | 44 | var result = input.map(function (x, i) { |
| 35 | 412 | return (self.weight(i) * x); |
| 36 | }).reduce(function (a, b) { | |
| 37 | 412 | return a + b; |
| 38 | }, self.bias()); | |
| 39 | ||
| 40 | if (result > 0) | |
| 41 | return 1; | |
| 42 | else | |
| 43 | return 0; | |
| 44 | }; | |
| 45 | ||
| 46 | 1 | module.exports = Perceptron; |
| 47 |
| Line | Hits | Source |
|---|---|---|
| 1 | 1 | var util = require('util'); |
| 2 | 1 | var Neuron = require('./interface'); |
| 3 | ||
| 4 | /** | |
| 5 | The SigmoidNeuron is a neuron that, | |
| 6 | given an array of input bits, | |
| 7 | takes the product of those bits | |
| 8 | and their weights, plus the neuron's "bias", | |
| 9 | and returns the negative inverse of that value. | |
| 10 | ||
| 11 | Unlike a Perceptron, the SigmoidNeuron | |
| 12 | returns values between 0 and 1. | |
| 13 | ||
| 14 | @class | |
| 15 | @implements Neuron | |
| 16 | */ | |
| 17 | function SigmoidNeuron () { | |
| 18 | 142 | return Neuron.apply(this, arguments); |
| 19 | } | |
| 20 | ||
| 21 | 1 | util.inherits(SigmoidNeuron, Neuron); |
| 22 | ||
| 23 | /** | |
| 24 | Given an array of input bits, | |
| 25 | takes the product of those bits | |
| 26 | and their weights, plus the neuron's "bias", | |
| 27 | and returns the negative inverse of that value. | |
| 28 | ||
| 29 | @function | |
| 30 | @param {array} input - An array of input bits. | |
| 31 | */ | |
| 32 | 1 | SigmoidNeuron.prototype._process = function (input) { |
| 33 | 7421 | var self = this; |
| 34 | 7421 | var result = input.map(function (x, i) { |
| 35 | 21545 | return (self.weight(i) * x); |
| 36 | }).reduce(function (a, b) { | |
| 37 | 21545 | return a + b; |
| 38 | }, self.bias()); | |
| 39 | ||
| 40 | 7421 | var inverse = 1 / (1 + Math.exp(-result)); |
| 41 | ||
| 42 | 7421 | return inverse; |
| 43 | }; | |
| 44 | ||
| 45 | 1 | module.exports = SigmoidNeuron; |
| 46 |