30#include "ArtificialNeuralNet.h"
34double generateWeights(
double dMin,
double dMax)
36 return applyWeightPrecision((dMax - dMin) * ((
double)Main::Instance().ITool().randWELL() / (
double)4294967295) + dMin);
39double applyWeightPrecision(
double dInput)
41 return floor(dInput / ANN_PRECISION + 0.5) * ANN_PRECISION;
45Neuron::Neuron(Sint32 NumInputs)
48 Tool &mTool = Main::Instance().ITool();
50 #ifdef ANN_ENABLE_BIAS
52 iNumInputs = NumInputs + 1;
54 iNumInputs = NumInputs;
57 for(i = 0; i < iNumInputs; ++i)
60 vWeight.push_back(generateWeights());
65NeuronLayer::NeuronLayer(Sint32 NumNeurons,Sint32 NumInputsPerNeuron)
68 iNumNeurons = NumNeurons;
70 for(i = 0; i < NumNeurons; ++i)
71 vNeurons.push_back(Neuron(NumInputsPerNeuron));
75ArtificialNeuralNet::ArtificialNeuralNet()
77 iActivationFunc = ANN_ACTIVATION_SIGMOID;
81 iNeuronsPerHiddenLyr = 0;
85void ArtificialNeuralNet::init(Sint32 NumInputs, Sint32 NumOutputs, Sint32 NumHiddenLayers, Sint32 NeuronsPerHiddenLayer)
87 iNumInputs = NumInputs;
88 iNumOutputs = NumOutputs;
89 iNumHiddenLayers = NumHiddenLayers;
90 iNeuronsPerHiddenLyr = NeuronsPerHiddenLayer;
93 if(iNumHiddenLayers > 0)
96 vNeuronsLayers.push_back(NeuronLayer(iNeuronsPerHiddenLyr, iNumInputs));
99 for(Sint32 i = 0; i < iNumHiddenLayers - 1; ++i)
101 vNeuronsLayers.push_back(NeuronLayer(iNeuronsPerHiddenLyr,iNeuronsPerHiddenLyr));
105 vNeuronsLayers.push_back(NeuronLayer(iNumOutputs, iNeuronsPerHiddenLyr));
111 vNeuronsLayers.push_back(NeuronLayer(iNumOutputs, iNumInputs));
116void ArtificialNeuralNet::setActivationFunction(Sint32 iNewActivation)
118 if(iNewActivation <= ANN_ACTIVATION_LINEAR) iActivationFunc = ANN_ACTIVATION_LINEAR;
119 else if(iNewActivation == ANN_ACTIVATION_STEP) iActivationFunc = ANN_ACTIVATION_STEP;
120 else iActivationFunc = ANN_ACTIVATION_SIGMOID;
124Sint32 ArtificialNeuralNet::getActivationFunction()
126 return iActivationFunc;
130vector<double> ArtificialNeuralNet::getWeights()
132 vector<double> weights;
136 for(i = 0; i < iNumHiddenLayers + 1; ++i)
139 for(j = 0; j < vNeuronsLayers[i].iNumNeurons; ++j)
142 for(k = 0; k < vNeuronsLayers[i].vNeurons[j].iNumInputs; ++k)
144 weights.push_back(vNeuronsLayers[i].vNeurons[j].vWeight[k]);
153Sint32 ArtificialNeuralNet::setWeights(vector<double>& vW)
155 Sint32 cWeight = 0, i, j, k;
158 if(getNumberOfWeights() > vW.size())
return -1;
161 for(i = 0; i < iNumHiddenLayers + 1; ++i)
164 for(j = 0; j < vNeuronsLayers[i].iNumNeurons; ++j)
167 for (k = 0; k < vNeuronsLayers[i].vNeurons[j].iNumInputs; ++k)
169 vNeuronsLayers[i].vNeurons[j].vWeight[k] = vW[cWeight++];
178Sint32 ArtificialNeuralNet::getNumberOfWeights()
180 Sint32 i, j, weights = 0;
183 for(i = 0; i < iNumHiddenLayers + 1; ++i)
186 for(j = 0; j < vNeuronsLayers[i].iNumNeurons; ++j)
188 weights += vNeuronsLayers[i].vNeurons[j].iNumInputs;
196vector<double> ArtificialNeuralNet::update(vector<double>& inputs)
198 vector<double> outputs;
199 Sint32 cWeight = 0, i, j, k;
202 if(inputs.size() != iNumInputs)
209 for(i = 0; i < iNumHiddenLayers + 1; ++i)
220 for(j = 0; j < vNeuronsLayers[i].iNumNeurons; ++j)
224 Sint32 NumInputs = vNeuronsLayers[i].vNeurons[j].iNumInputs;
227 #ifdef ANN_ENABLE_BIAS
228 for(k = 0; k < NumInputs - 1; ++k)
230 for(k = 0; k < NumInputs; ++k)
234 netinput += vNeuronsLayers[i].vNeurons[j].vWeight[k] * inputs[cWeight++];
237 #ifdef ANN_ENABLE_BIAS
239 netinput += vNeuronsLayers[i].vNeurons[j].vWeight[NumInputs - 1] * (-1.0);
243 switch(iActivationFunc)
245 case ANN_ACTIVATION_LINEAR:
246 outputs.push_back(linear(netinput, 1.0));
248 case ANN_ACTIVATION_STEP:
249 outputs.push_back(step(netinput, 1.0));
251 case ANN_ACTIVATION_SIGMOID:
252 outputs.push_back(sigmoid(netinput, 1.0));
263Sint32 ArtificialNeuralNet::getNumOutputs()
267Sint32 ArtificialNeuralNet::getNumInputs()
271Sint32 ArtificialNeuralNet::getNumHiddenLayers()
273 return iNumHiddenLayers;
275Sint32 ArtificialNeuralNet::getNumNeuronsPerLayer()
277 return iNeuronsPerHiddenLyr;
281double ArtificialNeuralNet::sigmoid(
double netinput,
double response)
283 return( 1 / ( 1 + exp(-netinput / response)));
287double ArtificialNeuralNet::linear(
double netinput,
double scale)
289 return netinput * scale;
294double ArtificialNeuralNet::step(
double netinput,
double threshold)
296 if(netinput > threshold)
return 1;