From 6c161b905c10ed95702406ea9c89cecda8729d88 Mon Sep 17 00:00:00 2001 From: JulioJerez Date: Mon, 30 Oct 2023 07:19:20 -0700 Subject: [PATCH] print symbolic convolution operation, (wip) --- .../ndSandbox/toolbox/ndTestDeepBrain.cpp | 33 +++--- ...BrainLayerCategoricalSoftmaxActivation.cpp | 1 + .../sdk/dBrain/ndBrainLayerConvolutional.cpp | 106 +++++++++++++++++- .../sdk/dBrain/ndBrainLayerConvolutional.h | 12 +- newton-4.00/sdk/dBrain/ndBrainLayerLinear.cpp | 1 + .../sdk/dBrain/ndBrainLayerReluActivation.cpp | 24 ++-- .../dBrain/ndBrainLayerSigmoidActivation.cpp | 36 +++--- .../dBrain/ndBrainLayerSoftmaxActivation.cpp | 69 ++++++------ .../sdk/dBrain/ndBrainLayerTanhActivation.cpp | 29 +++-- 9 files changed, 215 insertions(+), 96 deletions(-) diff --git a/newton-4.00/applications/ndSandbox/toolbox/ndTestDeepBrain.cpp b/newton-4.00/applications/ndSandbox/toolbox/ndTestDeepBrain.cpp index 51a1a715c7..229d256994 100644 --- a/newton-4.00/applications/ndSandbox/toolbox/ndTestDeepBrain.cpp +++ b/newton-4.00/applications/ndSandbox/toolbox/ndTestDeepBrain.cpp @@ -524,26 +524,29 @@ static void MnistTrainingSet() ndInt32 width = trainingDigits->GetColumns() / height; ndAssert((height * width) == trainingDigits->GetColumns()); + const ndBrainLayerConvolutional* conv; + const ndBrainLayerConvolutionalMaxPooling* pooling; + //layers.PushBack(new ndBrainLayerConvolutional(width, height, 1, 5, 16)); layers.PushBack(new ndBrainLayerConvolutional(width, height, 1, 5, 1)); layers.PushBack(new ndBrainLayerReluActivation(layers[layers.GetCount() - 1]->GetOutputSize())); - const ndBrainLayerConvolutional* const conv0 = (ndBrainLayerConvolutional*)(layers[layers.GetCount() - 2]); - layers.PushBack(new ndBrainLayerConvolutionalMaxPooling(conv0->GetOutputWidth(), conv0->GetOutputHeight(), conv0->GetOutputChannels())); - const ndBrainLayerConvolutionalMaxPooling* const pooling0 = (ndBrainLayerConvolutionalMaxPooling*)(layers[layers.GetCount() - 1]); - - //layers.PushBack(new ndBrainLayerConvolutional(pooling0->GetOutputWidth(), pooling0->GetOutputHeight(), pooling0->GetOutputChannels(), 3, 16)); - layers.PushBack(new ndBrainLayerConvolutional(pooling0->GetOutputWidth(), pooling0->GetOutputHeight(), pooling0->GetOutputChannels(), 3, 2)); - layers.PushBack(new ndBrainLayerReluActivation(layers[layers.GetCount() - 1]->GetOutputSize())); - const ndBrainLayerConvolutional* const conv1 = (ndBrainLayerConvolutional*)(layers[layers.GetCount() - 2]); - layers.PushBack(new ndBrainLayerConvolutionalMaxPooling(conv1->GetOutputWidth(), conv1->GetOutputHeight(), conv1->GetOutputChannels())); - const ndBrainLayerConvolutionalMaxPooling* const pooling1 = (ndBrainLayerConvolutionalMaxPooling*)(layers[layers.GetCount() - 1]); + conv = (ndBrainLayerConvolutional*)(layers[layers.GetCount() - 2]); + layers.PushBack(new ndBrainLayerConvolutionalMaxPooling(conv->GetOutputWidth(), conv->GetOutputHeight(), conv->GetOutputChannels())); + pooling = (ndBrainLayerConvolutionalMaxPooling*)(layers[layers.GetCount() - 1]); - //layers.PushBack(new ndBrainLayerConvolutional(pooling1->GetOutputWidth(), pooling1->GetOutputHeight(), pooling1->GetOutputChannels(), 3, 32)); - layers.PushBack(new ndBrainLayerConvolutional(pooling1->GetOutputWidth(), pooling1->GetOutputHeight(), pooling1->GetOutputChannels(), 3, 3)); + //layers.PushBack(new ndBrainLayerConvolutional(pooling->GetOutputWidth(), pooling->GetOutputHeight(), pooling->GetOutputChannels(), 3, 16)); + layers.PushBack(new ndBrainLayerConvolutional(pooling->GetOutputWidth(), pooling->GetOutputHeight(), pooling->GetOutputChannels(), 3, 1)); layers.PushBack(new ndBrainLayerReluActivation(layers[layers.GetCount() - 1]->GetOutputSize())); - const ndBrainLayerConvolutional* const conv2 = (ndBrainLayerConvolutional*)(layers[layers.GetCount() - 2]); - layers.PushBack(new ndBrainLayerConvolutionalMaxPooling(conv2->GetOutputWidth(), conv2->GetOutputHeight(), conv2->GetOutputChannels())); - //const ndBrainLayerConvolutionalMaxPooling* const pooling2 = (ndBrainLayerConvolutionalMaxPooling*)(layers[layers.GetCount() - 1]); + conv = (ndBrainLayerConvolutional*)(layers[layers.GetCount() - 2]); + layers.PushBack(new ndBrainLayerConvolutionalMaxPooling(conv->GetOutputWidth(), conv->GetOutputHeight(), conv->GetOutputChannels())); + pooling = (ndBrainLayerConvolutionalMaxPooling*)(layers[layers.GetCount() - 1]); + + ////layers.PushBack(new ndBrainLayerConvolutional(pooling->GetOutputWidth(), pooling->GetOutputHeight(), pooling->GetOutputChannels(), 3, 32)); + //layers.PushBack(new ndBrainLayerConvolutional(pooling->GetOutputWidth(), pooling->GetOutputHeight(), pooling->GetOutputChannels(), 3, 1)); + //layers.PushBack(new ndBrainLayerReluActivation(layers[layers.GetCount() - 1]->GetOutputSize())); + //conv = (ndBrainLayerConvolutional*)(layers[layers.GetCount() - 2]); + //layers.PushBack(new ndBrainLayerConvolutionalMaxPooling(conv->GetOutputWidth(), conv->GetOutputHeight(), conv->GetOutputChannels())); + //pooling = (ndBrainLayerConvolutionalMaxPooling*)(layers[layers.GetCount() - 1]); layers.PushBack(new ndBrainLayerLinear(layers[layers.GetCount() - 1]->GetOutputSize(), trainingLabels->GetColumns())); layers.PushBack(new ndBrainLayerCategoricalSoftmaxActivation(layers[layers.GetCount() - 1]->GetOutputSize())); diff --git a/newton-4.00/sdk/dBrain/ndBrainLayerCategoricalSoftmaxActivation.cpp b/newton-4.00/sdk/dBrain/ndBrainLayerCategoricalSoftmaxActivation.cpp index f2a943578a..1c96e31f46 100644 --- a/newton-4.00/sdk/dBrain/ndBrainLayerCategoricalSoftmaxActivation.cpp +++ b/newton-4.00/sdk/dBrain/ndBrainLayerCategoricalSoftmaxActivation.cpp @@ -50,6 +50,7 @@ void ndBrainLayerCategoricalSoftmaxActivation::InputDerivative(const ndBrainVect // check that the ouputDerivative is a one hat encoding ndAssert(output.GetCount() == inputDerivative.GetCount()); ndAssert(output.GetCount() == outputDerivative.GetCount()); + ndInt32 index = 0; for (ndInt32 i = 0; i < outputDerivative.GetCount(); ++i) { diff --git a/newton-4.00/sdk/dBrain/ndBrainLayerConvolutional.cpp b/newton-4.00/sdk/dBrain/ndBrainLayerConvolutional.cpp index e760542401..d91a25417d 100644 --- a/newton-4.00/sdk/dBrain/ndBrainLayerConvolutional.cpp +++ b/newton-4.00/sdk/dBrain/ndBrainLayerConvolutional.cpp @@ -52,6 +52,10 @@ ndBrainLayerConvolutional::ndBrainLayerConvolutional(ndInt32 inputWidth, ndInt32 } offset += m_inputWidth; } + + //if (inputWidth == 5) + if (inputWidth == 12) + Debug(inputWidth, m_inputHeight, inputDepth, kernelSize, numberOfKernels); } ndBrainLayerConvolutional::ndBrainLayerConvolutional(const ndBrainLayerConvolutional& src) @@ -144,7 +148,6 @@ void ndBrainLayerConvolutional::InitWeights(ndBrainFloat weighVariance, ndBrainF InitGaussianWeights(weighVariance); } -//void ndBrainLayerConvolutional::CopyFrom(const ndBrainLayer& src) void ndBrainLayerConvolutional::Set(const ndBrainLayer& src) { const ndBrainLayerConvolutional& convSrc = (ndBrainLayerConvolutional&)src; @@ -288,6 +291,106 @@ void ndBrainLayerConvolutional::PredictionOutputChannel(const ndBrainVector& inp } } +void ndBrainLayerConvolutional::Debug(ndInt32 width, ndInt32 height, ndInt32 channels, ndInt32 filterSize, ndInt32 filterCount) +{ + // print inputs + for (ndInt32 channel = 0; channel < channels; ++channel) + { + for (ndInt32 y = 0; y < height; ++y) + { + for (ndInt32 x = 0; x < width; ++x) + { + ndTrace(("x(%d,%d,%d) ", channel, y, x)); + } + ndTrace(("\n")); + } + ndTrace(("\n")); + } + + // print kilters + ndTrace(("\n")); + for (ndInt32 filter = 0; filter < filterCount; ++filter) + { + for (ndInt32 channel = 0; channel < channels; ++channel) + { + for (ndInt32 y = 0; y < filterSize; ++y) + { + for (ndInt32 x = 0; x < filterSize; ++x) + { + ndTrace(("w(%d,%d,%d,%d) ", filter, channel, y, x)); + } + ndTrace(("\n")); + } + ndTrace(("\n")); + } + ndTrace(("b(%d)\n", filter)); + ndTrace(("\n")); + } + + // print outputs + ndTrace(("\n")); + for (ndInt32 filter = 0; filter < filterCount; ++filter) + { + for (ndInt32 y = 0; y < (height - filterSize + 1); ++y) + { + for (ndInt32 x = 0; x < (width - filterSize + 1); ++x) + { + ndTrace(("y(%d,%d,%d) ", filter, y, x)); + } + ndTrace(("\n")); + } + ndTrace(("\n")); + //ndTrace(("b(%d)\n\n", filter)); + } + + + // print convolutions + ndTrace(("\n")); + for (ndInt32 filter = 0; filter < filterCount; ++filter) + { + for (ndInt32 y0 = 0; y0 < (height - filterSize + 1); ++y0) + { + for (ndInt32 x0 = 0; x0 < (width - filterSize + 1); ++x0) + { + ndTrace(("y(%d,%d,%d)=\n", filter, y0, x0)); + for (ndInt32 channel = 0; channel < channels; ++channel) + { + for (ndInt32 y = 0; y < filterSize; ++y) + { + ndTrace((" ")); + for (ndInt32 x = 0; x < filterSize; ++x) + { + ndTrace(("x(%d,%d,%d)*w(%d,%d,%d,%d) + ", channel, y0 + y, x0 + x, filter, channel, y, x)); + } + ndTrace(("\n")); + } + } + ndTrace((" b(%d)\n", filter)); + ndTrace(("\n")); + } + } + ndTrace(("\n")); + } + + // print weight gradients + for (ndInt32 filter = 0; filter < filterCount; ++filter) + { + for (ndInt32 channel = 0; channel < channels; ++channel) + { + for (ndInt32 y0 = 0; y0 < filterSize; ++y0) + { + for (ndInt32 x0 = 0; x0 < filterSize; ++x0) + { + ndTrace(("dL/dw(%d,%d,%d,%d)=\n", filter, channel, y0, x0)); + + + ndTrace(("\n")); + } + } + } + } +} + void ndBrainLayerConvolutional::MakePrediction(const ndBrainVector& input, ndBrainVector& output) const { //m_weights.Mul(input, output); @@ -313,6 +416,7 @@ void ndBrainLayerConvolutional::CalculateParamGradients( ndBrainLayerConvolutional* const gradients = (ndBrainLayerConvolutional*)gradientOut; ndAssert(gradients->m_bias.GetCount() == m_numberOfKernels); + ndAssert(output.GetCount() == outputDerivative.GetCount()); //gradients->m_bias.Set(outputDerivative); const ndInt32 size = m_outputWidth * m_outputHeight; diff --git a/newton-4.00/sdk/dBrain/ndBrainLayerConvolutional.h b/newton-4.00/sdk/dBrain/ndBrainLayerConvolutional.h index 1f9fc775e4..1cc702cfca 100644 --- a/newton-4.00/sdk/dBrain/ndBrainLayerConvolutional.h +++ b/newton-4.00/sdk/dBrain/ndBrainLayerConvolutional.h @@ -58,9 +58,15 @@ class ndBrainLayerConvolutional : public ndBrainLayer virtual void Save(const ndBrainSave* const loadSave) const; static ndBrainLayer* Load(const ndBrainLoad* const loadSave); + //virtual void Clear(); + //virtual void FlushToZero(); + //virtual void Scale(ndBrainFloat scale); + virtual void Set(const ndBrainLayer& src); + //virtual void Add(const ndBrainLayer& src); + //virtual void Mul(const ndBrainLayer& src); + //virtual void Blend(const ndBrainLayer& src, ndBrainFloat blend); + //virtual void ScaleAdd(const ndBrainLayer& src, ndBrainFloat scale); - void Set(const ndBrainLayer& src); - private: void InitGaussianBias(ndBrainFloat variance); void InitGaussianWeights(ndBrainFloat variance); @@ -68,6 +74,8 @@ class ndBrainLayerConvolutional : public ndBrainLayer ndBrainFloat CrossCorrelation(const ndBrainVector& input, const ndBrainVector& kernels) const; void PredictionOutputChannel(const ndBrainVector& input, const ndBrainVector& kernels, ndBrainFloat bias, ndBrainVector& output) const; + void Debug(ndInt32 width, ndInt32 height, ndInt32 channels, ndInt32 filterSize, ndInt32 filterCount); + ndBrainVector m_bias; ndBrainVector m_kernels; ndFixSizeArray m_inputOffsets; diff --git a/newton-4.00/sdk/dBrain/ndBrainLayerLinear.cpp b/newton-4.00/sdk/dBrain/ndBrainLayerLinear.cpp index bcced21843..cd6963c258 100644 --- a/newton-4.00/sdk/dBrain/ndBrainLayerLinear.cpp +++ b/newton-4.00/sdk/dBrain/ndBrainLayerLinear.cpp @@ -272,6 +272,7 @@ void ndBrainLayerLinear::CalculateParamGradients( ndAssert(!strcmp(GetLabelId(), gradientOut->GetLabelId())); ndBrainLayerLinear* const gradients = (ndBrainLayerLinear*)gradientOut; ndAssert(gradients->m_bias.GetCount() == outputDerivative.GetCount()); + ndAssert(output.GetCount() == outputDerivative.GetCount()); gradients->m_bias.Set(outputDerivative); for (ndInt32 i = outputDerivative.GetCount() - 1; i >= 0; --i) diff --git a/newton-4.00/sdk/dBrain/ndBrainLayerReluActivation.cpp b/newton-4.00/sdk/dBrain/ndBrainLayerReluActivation.cpp index 2edaabfa92..9a1073ea9e 100644 --- a/newton-4.00/sdk/dBrain/ndBrainLayerReluActivation.cpp +++ b/newton-4.00/sdk/dBrain/ndBrainLayerReluActivation.cpp @@ -43,6 +43,18 @@ const char* ndBrainLayerReluActivation::GetLabelId() const return "ndBrainLayerReluActivation"; } +ndBrainLayer* ndBrainLayerReluActivation::Load(const ndBrainLoad* const loadSave) +{ + char buffer[1024]; + loadSave->ReadString(buffer); + + loadSave->ReadString(buffer); + ndInt32 inputs = loadSave->ReadInt(); + ndBrainLayerReluActivation* const layer = new ndBrainLayerReluActivation(inputs); + loadSave->ReadString(buffer); + return layer; +} + void ndBrainLayerReluActivation::MakePrediction(const ndBrainVector& input, ndBrainVector& output) const { ndAssert(input.GetCount() == output.GetCount()); @@ -65,15 +77,3 @@ void ndBrainLayerReluActivation::InputDerivative(const ndBrainVector& output, co } inputDerivative.Mul(outputDerivative); } - -ndBrainLayer* ndBrainLayerReluActivation::Load(const ndBrainLoad* const loadSave) -{ - char buffer[1024]; - loadSave->ReadString(buffer); - - loadSave->ReadString(buffer); - ndInt32 inputs = loadSave->ReadInt(); - ndBrainLayerReluActivation* const layer = new ndBrainLayerReluActivation(inputs); - loadSave->ReadString(buffer); - return layer; -} \ No newline at end of file diff --git a/newton-4.00/sdk/dBrain/ndBrainLayerSigmoidActivation.cpp b/newton-4.00/sdk/dBrain/ndBrainLayerSigmoidActivation.cpp index 5e91a7a7ed..8f884f45b9 100644 --- a/newton-4.00/sdk/dBrain/ndBrainLayerSigmoidActivation.cpp +++ b/newton-4.00/sdk/dBrain/ndBrainLayerSigmoidActivation.cpp @@ -42,17 +42,16 @@ const char* ndBrainLayerSigmoidActivation::GetLabelId() const return "ndBrainLayerSigmoidActivation"; } -void ndBrainLayerSigmoidActivation::InputDerivative(const ndBrainVector& output, const ndBrainVector& outputDerivative, ndBrainVector& inputDerivative) const +ndBrainLayer* ndBrainLayerSigmoidActivation::Load(const ndBrainLoad* const loadSave) { - ndAssert(output.GetCount() == outputDerivative.GetCount()); - ndAssert(output.GetCount() == inputDerivative.GetCount()); - - inputDerivative.Set(ndBrainFloat(1.0f)); - inputDerivative.Sub(output); - inputDerivative.Mul(output); + char buffer[1024]; + loadSave->ReadString(buffer); - inputDerivative.Mul(outputDerivative); - inputDerivative.FlushToZero(); + loadSave->ReadString(buffer); + ndInt32 inputs = loadSave->ReadInt(); + ndBrainLayerSigmoidActivation* const layer = new ndBrainLayerSigmoidActivation(inputs); + loadSave->ReadString(buffer); + return layer; } void ndBrainLayerSigmoidActivation::MakePrediction(const ndBrainVector& input, ndBrainVector& output) const @@ -82,14 +81,15 @@ void ndBrainLayerSigmoidActivation::MakePrediction(const ndBrainVector& input, n } } -ndBrainLayer* ndBrainLayerSigmoidActivation::Load(const ndBrainLoad* const loadSave) +void ndBrainLayerSigmoidActivation::InputDerivative(const ndBrainVector& output, const ndBrainVector& outputDerivative, ndBrainVector& inputDerivative) const { - char buffer[1024]; - loadSave->ReadString(buffer); + ndAssert(output.GetCount() == outputDerivative.GetCount()); + ndAssert(output.GetCount() == inputDerivative.GetCount()); - loadSave->ReadString(buffer); - ndInt32 inputs = loadSave->ReadInt(); - ndBrainLayerSigmoidActivation* const layer = new ndBrainLayerSigmoidActivation(inputs); - loadSave->ReadString(buffer); - return layer; -} \ No newline at end of file + inputDerivative.Set(ndBrainFloat(1.0f)); + inputDerivative.Sub(output); + inputDerivative.Mul(output); + + inputDerivative.Mul(outputDerivative); + inputDerivative.FlushToZero(); +} diff --git a/newton-4.00/sdk/dBrain/ndBrainLayerSoftmaxActivation.cpp b/newton-4.00/sdk/dBrain/ndBrainLayerSoftmaxActivation.cpp index 313d7aff74..18f2d536fe 100644 --- a/newton-4.00/sdk/dBrain/ndBrainLayerSoftmaxActivation.cpp +++ b/newton-4.00/sdk/dBrain/ndBrainLayerSoftmaxActivation.cpp @@ -43,31 +43,16 @@ const char* ndBrainLayerSoftmaxActivation::GetLabelId() const return "ndBrainLayerSoftmaxActivation"; } -void ndBrainLayerSoftmaxActivation::InputDerivative(const ndBrainVector& output, const ndBrainVector& outputDerivative, ndBrainVector& inputDerivative) const +ndBrainLayer* ndBrainLayerSoftmaxActivation::Load(const ndBrainLoad* const loadSave) { - // calculate the output derivative which is a the Jacobian matrix time the input - //for (ndInt32 i = 0; i < output.GetCount(); ++i) - //{ - // ndFloat32 s = output[i]; - // ndFloat32 acc = (s * (ndFloat32(1.0f) - s)) * outputDerivative[i]; - // for (ndInt32 j = 0; j < output.GetCount(); ++j) - // { - // if (i != j) - // { - // acc -= s * output[j] * outputDerivative[j]; - // } - // } - // inputDerivative[i] = ndBrainFloat(acc); - //} - - // better way to calculate the output derivative which is a the Jacobian matrix time the input - // y = (O * I - O * transp(O)) * x - ndBrainFloat s = -outputDerivative.Dot(output); - inputDerivative.Set(output); - inputDerivative.Scale(s); - inputDerivative.MulAdd(output, outputDerivative); + char buffer[1024]; + loadSave->ReadString(buffer); - inputDerivative.FlushToZero(); + loadSave->ReadString(buffer); + ndInt32 inputs = loadSave->ReadInt(); + ndBrainLayerSoftmaxActivation* const layer = new ndBrainLayerSoftmaxActivation(inputs); + loadSave->ReadString(buffer); + return layer; } void ndBrainLayerSoftmaxActivation::MakePrediction(const ndBrainVector& input, ndBrainVector& output) const @@ -76,7 +61,7 @@ void ndBrainLayerSoftmaxActivation::MakePrediction(const ndBrainVector& input, n ndBrainFloat max = ndBrainFloat(1.0e-16f); for (ndInt32 i = input.GetCount() - 1; i >= 0; --i) { - max = ndMax (input[i], max); + max = ndMax(input[i], max); } ndBrainFloat acc = ndBrainFloat(0.0f); @@ -89,19 +74,37 @@ void ndBrainLayerSoftmaxActivation::MakePrediction(const ndBrainVector& input, n acc += prob; } - ndAssert(acc > ndBrainFloat (0.0f)); + ndAssert(acc > ndBrainFloat(0.0f)); output.Scale(ndBrainFloat(1.0f) / acc); output.FlushToZero(); } -ndBrainLayer* ndBrainLayerSoftmaxActivation::Load(const ndBrainLoad* const loadSave) +void ndBrainLayerSoftmaxActivation::InputDerivative(const ndBrainVector& output, const ndBrainVector& outputDerivative, ndBrainVector& inputDerivative) const { - char buffer[1024]; - loadSave->ReadString(buffer); + ndAssert(output.GetCount() == outputDerivative.GetCount()); + ndAssert(output.GetCount() == inputDerivative.GetCount()); - loadSave->ReadString(buffer); - ndInt32 inputs = loadSave->ReadInt(); - ndBrainLayerSoftmaxActivation* const layer = new ndBrainLayerSoftmaxActivation(inputs); - loadSave->ReadString(buffer); - return layer; + // calculate the output derivative which is a the Jacobian matrix time the input + //for (ndInt32 i = 0; i < output.GetCount(); ++i) + //{ + // ndFloat32 s = output[i]; + // ndFloat32 acc = (s * (ndFloat32(1.0f) - s)) * outputDerivative[i]; + // for (ndInt32 j = 0; j < output.GetCount(); ++j) + // { + // if (i != j) + // { + // acc -= s * output[j] * outputDerivative[j]; + // } + // } + // inputDerivative[i] = ndBrainFloat(acc); + //} + + // better way to calculate the output derivative which is a the Jacobian matrix time the input + // y = (O * I - O * transp(O)) * x + ndBrainFloat s = -outputDerivative.Dot(output); + inputDerivative.Set(output); + inputDerivative.Scale(s); + inputDerivative.MulAdd(output, outputDerivative); + + inputDerivative.FlushToZero(); } \ No newline at end of file diff --git a/newton-4.00/sdk/dBrain/ndBrainLayerTanhActivation.cpp b/newton-4.00/sdk/dBrain/ndBrainLayerTanhActivation.cpp index 01dc3f2b31..d0141cfa81 100644 --- a/newton-4.00/sdk/dBrain/ndBrainLayerTanhActivation.cpp +++ b/newton-4.00/sdk/dBrain/ndBrainLayerTanhActivation.cpp @@ -49,6 +49,18 @@ const char* ndBrainLayerTanhActivation::GetLabelId() const return "ndBrainLayerTanhActivation"; } +ndBrainLayer* ndBrainLayerTanhActivation::Load(const ndBrainLoad* const loadSave) +{ + char buffer[1024]; + loadSave->ReadString(buffer); + + loadSave->ReadString(buffer); + ndInt32 inputs = loadSave->ReadInt(); + ndBrainLayerTanhActivation* const layer = new ndBrainLayerTanhActivation(inputs); + loadSave->ReadString(buffer); + return layer; +} + void ndBrainLayerTanhActivation::MakePrediction(const ndBrainVector& input, ndBrainVector& output) const { ndAssert(input.GetCount() == output.GetCount()); @@ -70,19 +82,6 @@ void ndBrainLayerTanhActivation::InputDerivative(const ndBrainVector& output, co inputDerivative.FlushToZero(); } -ndBrainLayer* ndBrainLayerTanhActivation::Load(const ndBrainLoad* const loadSave) -{ - char buffer[1024]; - loadSave->ReadString(buffer); - - loadSave->ReadString(buffer); - ndInt32 inputs = loadSave->ReadInt(); - ndBrainLayerTanhActivation* const layer = new ndBrainLayerTanhActivation(inputs); - loadSave->ReadString(buffer); - return layer; -} - - ndBrainLayerApproximateTanhActivation::ndBrainLayerApproximateTanhActivation(ndInt32 neurons) :ndBrainLayerTanhActivation(neurons) { @@ -115,12 +114,12 @@ const char* ndBrainLayerApproximateTanhActivation::GetLabelId() const return "ndBrainLayerApproximateTanhActivation"; } -#if defined (D_SCALAR_VECTOR_CLASS) || (defined (D_NEWTON_USE_DOUBLE) && defined (D_BRAIN_USES_REAL)) +//#if defined (D_SCALAR_VECTOR_CLASS) || (defined (D_NEWTON_USE_DOUBLE) && defined (D_BRAIN_USES_REAL)) +#if defined (D_NEWTON_USE_DOUBLE) void ndBrainLayerApproximateTanhActivation::MakePrediction(const ndBrainVector& input, ndBrainVector& output) const { ndBrainLayerTanhActivation::MakePrediction(input, output); } - #else void ndBrainLayerApproximateTanhActivation::MakePrediction(const ndBrainVector& input, ndBrainVector& output) const