Pac-Man Evolution
Loading...
Searching...
No Matches
ArtificialNeuralNet.cpp
1/*----------------------------------------------------------------------
2Pac-Man Evolution - Roberto Prieto
3Copyright (C) 2018-2024 MegaStorm Systems
4contact@megastormsystems.com - http://www.megastormsystems.com
5
6This software is provided 'as-is', without any express or implied
7warranty. In no event will the authors be held liable for any damages
8arising from the use of this software.
9
10Permission is granted to anyone to use this software for any purpose,
11including commercial applications, and to alter it and redistribute it
12freely, subject to the following restrictions:
13
141. The origin of this software must not be misrepresented; you must not
15claim that you wrote the original software. If you use this software
16in a product, an acknowledgment in the product documentation would be
17appreciated but is not required.
182. Altered source versions must be plainly marked as such, and must not be
19misrepresented as being the original software.
203. This notice may not be removed or altered from any source distribution.
21
22------------------------------------------------------------------------
23
24Artificial Neural Network class
25
26Based on the amazing docs created by Mat Buckland (http://www.ai-junkie.com/ann/evolved/nnt1.html)
27
28------------------------------------------------------------------------ */
29
30#include "ArtificialNeuralNet.h"
31#include <math.h>
32
33// Function for generating weights inside the weight range
34double generateWeights(double dMin, double dMax)
35{
36 return applyWeightPrecision((dMax - dMin) * ((double)Main::Instance().ITool().randWELL() / (double)4294967295) + dMin);
37}
38// Function for applyting our precision
39double applyWeightPrecision(double dInput)
40{
41 return floor(dInput / ANN_PRECISION + 0.5) * ANN_PRECISION;
42}
43
44// Neuron constructor
45Neuron::Neuron(Sint32 NumInputs)
46{
47 Sint32 i;
48 Tool &mTool = Main::Instance().ITool();
49
50 #ifdef ANN_ENABLE_BIAS
51 // Additional weight for the bias
52 iNumInputs = NumInputs + 1;
53 #else
54 iNumInputs = NumInputs;
55 #endif
56
57 for(i = 0; i < iNumInputs; ++i)
58 {
59 // Set up the weights with an initial random value between ANN_WEIGHT_MIN and ANN_WEIGHT_MAX
60 vWeight.push_back(generateWeights());
61 }
62}
63
64// NeuronLayer constructor
65NeuronLayer::NeuronLayer(Sint32 NumNeurons,Sint32 NumInputsPerNeuron)
66{
67 Sint32 i;
68 iNumNeurons = NumNeurons;
69
70 for(i = 0; i < NumNeurons; ++i)
71 vNeurons.push_back(Neuron(NumInputsPerNeuron));
72}
73
74// ArtificialNeuralNet constructor
75ArtificialNeuralNet::ArtificialNeuralNet()
76{
77 iActivationFunc = ANN_ACTIVATION_SIGMOID;
78 iNumInputs = 0;
79 iNumOutputs = 0;
80 iNumHiddenLayers = 0;
81 iNeuronsPerHiddenLyr = 0;
82}
83
84// Initialize the NN. It creates random weight from [ANN_WEIGHT_MIN, ANN_WEIGHT_MAX]
85void ArtificialNeuralNet::init(Sint32 NumInputs, Sint32 NumOutputs, Sint32 NumHiddenLayers, Sint32 NeuronsPerHiddenLayer)
86{
87 iNumInputs = NumInputs;
88 iNumOutputs = NumOutputs;
89 iNumHiddenLayers = NumHiddenLayers;
90 iNeuronsPerHiddenLyr = NeuronsPerHiddenLayer;
91
92 // Create the layers
93 if(iNumHiddenLayers > 0)
94 {
95 // First layer (input layer)
96 vNeuronsLayers.push_back(NeuronLayer(iNeuronsPerHiddenLyr, iNumInputs));
97
98 // Hidden layers
99 for(Sint32 i = 0; i < iNumHiddenLayers - 1; ++i)
100 {
101 vNeuronsLayers.push_back(NeuronLayer(iNeuronsPerHiddenLyr,iNeuronsPerHiddenLyr));
102 }
103
104 // Output layer
105 vNeuronsLayers.push_back(NeuronLayer(iNumOutputs, iNeuronsPerHiddenLyr));
106 }
107
108 else
109 {
110 // Only one layer: the input and output layer
111 vNeuronsLayers.push_back(NeuronLayer(iNumOutputs, iNumInputs));
112 }
113}
114
115// Set activation function
116void ArtificialNeuralNet::setActivationFunction(Sint32 iNewActivation)
117{
118 if(iNewActivation <= ANN_ACTIVATION_LINEAR) iActivationFunc = ANN_ACTIVATION_LINEAR;
119 else if(iNewActivation == ANN_ACTIVATION_STEP) iActivationFunc = ANN_ACTIVATION_STEP;
120 else iActivationFunc = ANN_ACTIVATION_SIGMOID;
121}
122
123// Get activation function
124Sint32 ArtificialNeuralNet::getActivationFunction()
125{
126 return iActivationFunc;
127}
128
129// Get a vector with all the weights
130vector<double> ArtificialNeuralNet::getWeights()
131{
132 vector<double> weights;
133 Sint32 i, j, k;
134
135 // Loop through each layer
136 for(i = 0; i < iNumHiddenLayers + 1; ++i)
137 {
138 // Neurons
139 for(j = 0; j < vNeuronsLayers[i].iNumNeurons; ++j)
140 {
141 // and weights
142 for(k = 0; k < vNeuronsLayers[i].vNeurons[j].iNumInputs; ++k)
143 {
144 weights.push_back(vNeuronsLayers[i].vNeurons[j].vWeight[k]);
145 }
146 }
147 }
148
149 return weights;
150}
151
152// Set all the weights replacing current values using the provided vector<double>
153Sint32 ArtificialNeuralNet::setWeights(vector<double>& vW)
154{
155 Sint32 cWeight = 0, i, j, k;
156
157 // Check we have enough weights
158 if(getNumberOfWeights() > vW.size()) return -1;
159
160 // Loop through each layer
161 for(i = 0; i < iNumHiddenLayers + 1; ++i)
162 {
163 // Neurons
164 for(j = 0; j < vNeuronsLayers[i].iNumNeurons; ++j)
165 {
166 // and weights
167 for (k = 0; k < vNeuronsLayers[i].vNeurons[j].iNumInputs; ++k)
168 {
169 vNeuronsLayers[i].vNeurons[j].vWeight[k] = vW[cWeight++];
170 }
171 }
172 }
173
174 return 0;
175}
176
177// Return the number of weights
178Sint32 ArtificialNeuralNet::getNumberOfWeights()
179{
180 Sint32 i, j, weights = 0;
181
182 // Loop through each layer
183 for(i = 0; i < iNumHiddenLayers + 1; ++i)
184 {
185 // Neurons
186 for(j = 0; j < vNeuronsLayers[i].iNumNeurons; ++j)
187 {
188 weights += vNeuronsLayers[i].vNeurons[j].iNumInputs;
189 }
190 }
191
192 return weights;
193}
194
195// Using the input vector, it calculates using the NN the returned output vector
196vector<double> ArtificialNeuralNet::update(vector<double>& inputs)
197{
198 vector<double> outputs;
199 Sint32 cWeight = 0, i, j, k;
200
201 // We need a match between our inputs and the size of the input vector
202 if(inputs.size() != iNumInputs)
203 {
204 // Return empty vector
205 return outputs;
206 }
207
208 // Loop through each layer
209 for(i = 0; i < iNumHiddenLayers + 1; ++i)
210 {
211 if(i > 0)
212 {
213 inputs = outputs;
214 }
215
216 outputs.clear();
217 cWeight = 0;
218
219 // For each neuron sum the (inputs * corresponding weights).
220 for(j = 0; j < vNeuronsLayers[i].iNumNeurons; ++j)
221 {
222 double netinput = 0;
223
224 Sint32 NumInputs = vNeuronsLayers[i].vNeurons[j].iNumInputs;
225
226 // For each weight
227 #ifdef ANN_ENABLE_BIAS
228 for(k = 0; k < NumInputs - 1; ++k)
229 #else
230 for(k = 0; k < NumInputs; ++k)
231 #endif
232 {
233 // Sum the weights x inputs
234 netinput += vNeuronsLayers[i].vNeurons[j].vWeight[k] * inputs[cWeight++];
235 }
236
237 #ifdef ANN_ENABLE_BIAS
238 // Add in the bias (-1.0)
239 netinput += vNeuronsLayers[i].vNeurons[j].vWeight[NumInputs - 1] * (-1.0);
240 #endif
241
242 // The result is sent to our activation function and stored in the output vector
243 switch(iActivationFunc)
244 {
245 case ANN_ACTIVATION_LINEAR:
246 outputs.push_back(linear(netinput, 1.0));
247 break;
248 case ANN_ACTIVATION_STEP:
249 outputs.push_back(step(netinput, 1.0));
250 break;
251 case ANN_ACTIVATION_SIGMOID:
252 outputs.push_back(sigmoid(netinput, 1.0));
253 break;
254 }
255 cWeight = 0;
256 }
257 }
258
259 return outputs;
260}
261
262// Get neural network components
263Sint32 ArtificialNeuralNet::getNumOutputs()
264{
265 return iNumOutputs;
266}
267Sint32 ArtificialNeuralNet::getNumInputs()
268{
269 return iNumInputs;
270}
271Sint32 ArtificialNeuralNet::getNumHiddenLayers()
272{
273 return iNumHiddenLayers;
274}
275Sint32 ArtificialNeuralNet::getNumNeuronsPerLayer()
276{
277 return iNeuronsPerHiddenLyr;
278}
279
280// Activation function: Sigmoid
281double ArtificialNeuralNet::sigmoid(double netinput, double response)
282{
283 return( 1 / ( 1 + exp(-netinput / response)));
284}
285
286// Activation function: Linear
287double ArtificialNeuralNet::linear(double netinput, double scale)
288{
289 return netinput * scale;
290
291}
292
293// Activation function: Step
294double ArtificialNeuralNet::step(double netinput, double threshold)
295{
296 if(netinput > threshold) return 1;
297 else return 0;
298}