diff --git a/tmva/tmva/inc/TMVA/DNN/Architectures/Cpu.h b/tmva/tmva/inc/TMVA/DNN/Architectures/Cpu.h index 5bc872b9cc24c..94757cea819ef 100644 --- a/tmva/tmva/inc/TMVA/DNN/Architectures/Cpu.h +++ b/tmva/tmva/inc/TMVA/DNN/Architectures/Cpu.h @@ -381,6 +381,35 @@ class TCpu ///@} + //____________________________________________________________________________ + // + // Average Pooling Layer Propagation + //____________________________________________________________________________ + /** @name Forward Propagation in Avg Pooling Layer + */ + ///@{ + + /** Downsample the matrix \p B to the matrix \p A, using avg + * operation + */ + static void DownsampleAvg(TCpuMatrix &A, const TCpuMatrix &B, size_t imgHeight, + size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols); + + ///@} + + /** @name Backward Propagation in Avg Pooling Layer + */ + ///@{ + /** Perform the complete backward propagation step in a Pooling Layer. Based on the + * filter sizes used for computing the average, it just forwards the activation + * gradients to the previous layer. */ + static void AvgPoolLayerBackward(std::vector> &activationGradientsBackward, + const std::vector> &activationGradients, + size_t batchSize, size_t depth, size_t nLocalViews, + size_t fltHeight, size_t fltWidth); + + ///@} + //____________________________________________________________________________ // // Reshape Layer Propagation diff --git a/tmva/tmva/inc/TMVA/DNN/Architectures/Cuda.h b/tmva/tmva/inc/TMVA/DNN/Architectures/Cuda.h index 5b7bc22f4ce76..e5af6486b548f 100644 --- a/tmva/tmva/inc/TMVA/DNN/Architectures/Cuda.h +++ b/tmva/tmva/inc/TMVA/DNN/Architectures/Cuda.h @@ -373,6 +373,36 @@ class TCuda ///@} + //____________________________________________________________________________ + // + // Average Pooling Layer Propagation + //____________________________________________________________________________ + /** @name Forward Propagation in Avg Pooling Layer + */ + ///@{ + + /** Downsample the matrix \p B to the matrix \p A, using avg + * operation + */ + static void DownsampleAvg(TCudaMatrix &A, const TCudaMatrix &B, + const int imgHeight, const int imgWidth, const int fltHeight, const int fltWidth, + const int strideRows, const int strideCols); + ///@} + + /** @name Backward Propagation in Avg Pooling Layer + */ + ///@{ + + /** Perform the complete backward propagation step in a Pooling Layer. Based on the + * filter sizes used for computing the average, it just forwards the activation + * gradients to the previous layer. */ + static void AvgPoolLayerBackward(std::vector> &activationGradientsBackward, + const std::vector> &activationGradients, + size_t batchSize, size_t depth,size_t nLocalViews, + size_t fltHeight, size_t fltWidth); + + ///@} + //____________________________________________________________________________ // // Reshape Layer Propagation diff --git a/tmva/tmva/inc/TMVA/DNN/Architectures/Reference.h b/tmva/tmva/inc/TMVA/DNN/Architectures/Reference.h index 2c4aa87367c2d..5da2e4c3eea2b 100644 --- a/tmva/tmva/inc/TMVA/DNN/Architectures/Reference.h +++ b/tmva/tmva/inc/TMVA/DNN/Architectures/Reference.h @@ -389,6 +389,37 @@ class TReference size_t nLocalViews); ///@} + //____________________________________________________________________________ + // + // Average Pooling Layer Propagation + //____________________________________________________________________________ + /** @name Forward Propagation in Avg Pooling Layer + */ + ///@{ + + /** Downsample the matrix \p B to the matrix \p A, using avg + * operation + */ + static void DownsampleAvg(TMatrixT &A, const TMatrixT &B, size_t imgHeight, + size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, size_t strideCols); + + ///@} + + /** @name Backward Propagation in Avg Pooling Layer + */ + ///@{ + + /** Perform the complete backward propagation step in a Avg Pooling Layer. Based on the + * filter sizes used for computing the average, it just forwards the actiovation + * gradients to the previous layer. */ + static void AvgPoolLayerBackward(std::vector> &activationGradientsBackward, + const std::vector> &activationGradients, + size_t batchSize, size_t depth, size_t nLocalViews, + size_t fltHeight, size_t fltWidth); + ///@} + + + //____________________________________________________________________________ // // Reshape Layer Propagation diff --git a/tmva/tmva/inc/TMVA/DNN/CNN/AvgPoolLayer.h b/tmva/tmva/inc/TMVA/DNN/CNN/AvgPoolLayer.h new file mode 100644 index 0000000000000..15987702ffbfb --- /dev/null +++ b/tmva/tmva/inc/TMVA/DNN/CNN/AvgPoolLayer.h @@ -0,0 +1,201 @@ +// @(#)root/tmva/tmva/dnn:$Id$ +// Author: Vladimir Ilievski + +/********************************************************************************** + * Project: TMVA - a Root-integrated toolkit for multivariate data analysis * + * Package: TMVA * + * Class : TAvgPoolLayer * + * Web : http://tmva.sourceforge.net * + * * + * Description: * + * Max Pool Deep Neural Network Layer * + * * + * Authors (alphabetical): * + * Vladimir Ilievski - CERN, Switzerland * + * * + * Copyright (c) 2005-2015: * + * CERN, Switzerland * + * U. of Victoria, Canada * + * MPI-K Heidelberg, Germany * + * U. of Bonn, Germany * + * * + * Redistribution and use in source and binary forms, with or without * + * modification, are permitted according to the terms listed in LICENSE * + * (http://tmva.sourceforge.net/LICENSE) * + **********************************************************************************/ + +#ifndef AVGPOOLLAYER_H_ +#define AVGPOOLLAYER_H_ + +#include "TMatrix.h" + +#include "TMVA/DNN/GeneralLayer.h" +#include "TMVA/DNN/Functions.h" + +#include + +namespace TMVA { +namespace DNN { +namespace CNN { + +/** \class TAvgPoolLayer + + Generic Max Pooling Layer class. + + This generic Max Pooling Layer Class represents a pooling layer of + a CNN. It inherits all of the properties of the generic virtual base class + VGeneralLayer. In addition to that, it contains a matrix of winning units. + + The height and width of the weights and biases is set to 0, since this + layer does not contain any weights. + + */ +template +class TAvgPoolLayer : public VGeneralLayer { +public: + using Matrix_t = typename Architecture_t::Matrix_t; + using Scalar_t = typename Architecture_t::Scalar_t; + +private: + + size_t fFrameHeight; ///< The height of the frame. + size_t fFrameWidth; ///< The width of the frame. + + size_t fStrideRows; ///< The number of row pixels to slid the filter each step. + size_t fStrideCols; ///< The number of column pixels to slid the filter each step. + + size_t fNLocalViewPixels; ///< The number of pixels in one local image view. + size_t fNLocalViews; ///< The number of local views in one image. + + Scalar_t fDropoutProbability; ///< Probability that an input is active. + +public: + /*! Constructor. */ + TAvgPoolLayer(size_t BatchSize, size_t InputDepth, size_t InputHeight, size_t InputWidth, size_t Height, + size_t Width, size_t OutputNSlices, size_t OutputNRows, size_t OutputNCols, size_t FrameHeight, + size_t FrameWidth, size_t StrideRows, size_t StrideCols, Scalar_t DropoutProbability); + + /*! Copy the max pooling layer provided as a pointer */ + TAvgPoolLayer(TAvgPoolLayer *layer); + + /*! Copy constructor. */ + TAvgPoolLayer(const TAvgPoolLayer &); + + /*! Destructor. */ + ~TAvgPoolLayer(); + + /*! Computes activation of the layer for the given input. The input + * must be in 3D tensor form with the different matrices corresponding to + * different events in the batch. It spatially downsamples the input + * matrices. */ + void Forward(std::vector &input, bool applyDropout = false); + + /*! Depending on the winning units determined during the Forward pass, + * it only forwards the derivatives to the right units in the previous + * layer. Must only be called directly at the corresponding call + * to Forward(...). */ + void Backward(std::vector &gradients_backward, const std::vector &activations_backward, + std::vector &inp1, std::vector &inp2); + + /*! Prints the info about the layer. */ + void Print() const; + + /*! Getters */ + size_t GetFrameHeight() const { return fFrameHeight; } + size_t GetFrameWidth() const { return fFrameWidth; } + + size_t GetStrideRows() const { return fStrideRows; } + size_t GetStrideCols() const { return fStrideCols; } + + size_t GetNLocalViewPixels() const { return fNLocalViewPixels; } + size_t GetNLocalViews() const { return fNLocalViews; } + + Scalar_t GetDropoutProbability() const { return fDropoutProbability; } +}; + +//______________________________________________________________________________ +template +TAvgPoolLayer::TAvgPoolLayer(size_t batchSize, size_t inputDepth, size_t inputHeight, size_t inputWidth, + size_t height, size_t width, size_t outputNSlices, size_t outputNRows, + size_t outputNCols, size_t frameHeight, size_t frameWidth, + size_t strideRows, size_t strideCols, Scalar_t dropoutProbability) + : VGeneralLayer(batchSize, inputDepth, inputHeight, inputWidth, inputDepth, height, width, 0, 0, 0, + 0, 0, 0, outputNSlices, outputNRows, outputNCols, EInitialization::kZero), + fFrameHeight(frameHeight), fFrameWidth(frameWidth), fStrideRows(strideRows), + fStrideCols(strideCols), fNLocalViewPixels(inputDepth * frameHeight * frameWidth), fNLocalViews(height * width), + fDropoutProbability(dropoutProbability) +{ +} + +//______________________________________________________________________________ +template +TAvgPoolLayer::TAvgPoolLayer(TAvgPoolLayer *layer) + : VGeneralLayer(layer), fFrameHeight(layer->GetFrameHeight()), + fFrameWidth(layer->GetFrameWidth()), fStrideRows(layer->GetStrideRows()), fStrideCols(layer->GetStrideCols()), + fNLocalViewPixels(layer->GetNLocalViewPixels()), fNLocalViews(layer->GetNLocalViews()), + fDropoutProbability(layer->GetDropoutProbability()) +{ +} + +//______________________________________________________________________________ +template +TAvgPoolLayer::TAvgPoolLayer(const TAvgPoolLayer &layer) + : VGeneralLayer(layer), fFrameHeight(layer.fFrameHeight), + fFrameWidth(layer.fFrameWidth), fStrideRows(layer.fStrideRows), fStrideCols(layer.fStrideCols), + fNLocalViewPixels(layer.fNLocalViewPixels), fNLocalViews(layer.fNLocalViews), + fDropoutProbability(layer.fDropoutProbability) +{ +} + +//______________________________________________________________________________ +template +TAvgPoolLayer::~TAvgPoolLayer() +{ +} + +//______________________________________________________________________________ +template +auto TAvgPoolLayer::Forward(std::vector &input, bool applyDropout) -> void +{ + for (size_t i = 0; i < this->GetBatchSize(); i++) { + + if (applyDropout && (this->GetDropoutProbability() != 1.0)) { + Architecture_t::Dropout(input[i], this->GetDropoutProbability()); + } + + Architecture_t::DownsampleAvg(this->GetOutputAt(i), input[i], this->GetInputHeight(), + this->GetInputWidth(), this->GetFrameHeight(), this->GetFrameWidth(), + this->GetStrideRows(), this->GetStrideCols()); + } +} + +//______________________________________________________________________________ +template +auto TAvgPoolLayer::Backward(std::vector &gradients_backward, + const std::vector & /*activations_backward*/, + std::vector & /*inp1*/, std::vector & + /*inp2*/) -> void +{ + Architecture_t::AvgPoolLayerBackward(gradients_backward, this->GetActivationGradients(), + this->GetBatchSize(), this->GetDepth(), this->GetNLocalViews(), + this->GetFrameHeight(), this->GetFrameWidth()); +} + +//______________________________________________________________________________ +template +auto TAvgPoolLayer::Print() const -> void +{ + std::cout << "\t\t POOL LAYER: " << std::endl; + std::cout << "\t\t\t Width = " << this->GetWidth() << std::endl; + std::cout << "\t\t\t Height = " << this->GetHeight() << std::endl; + std::cout << "\t\t\t Depth = " << this->GetDepth() << std::endl; + + std::cout << "\t\t\t Frame Width = " << this->GetFrameWidth() << std::endl; + std::cout << "\t\t\t Frame Height = " << this->GetFrameHeight() << std::endl; +} + +} // namespace CNN +} // namespace DNN +} // namespace TMVA + +#endif diff --git a/tmva/tmva/src/DNN/Architectures/Cpu/Propagation.cxx b/tmva/tmva/src/DNN/Architectures/Cpu/Propagation.cxx index 02a35fd089084..99e2d106cb4a4 100644 --- a/tmva/tmva/src/DNN/Architectures/Cpu/Propagation.cxx +++ b/tmva/tmva/src/DNN/Architectures/Cpu/Propagation.cxx @@ -570,6 +570,60 @@ void TCpu::MaxPoolLayerBackward(std::vector> &activat } } +//____________________________________________________________________________ +template +void TCpu::DownsampleAvg(TCpuMatrix &A, const TCpuMatrix &B, + size_t imgHeight, size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, + size_t strideCols) +{ + // image boudaries + int imgHeightBound = imgHeight - (fltHeight - 1) / 2 - 1; + int imgWidthBound = imgWidth - (fltWidth - 1) / 2 - 1; + size_t currLocalView = 0; + + // centers + for (int i = fltHeight / 2; i <= imgHeightBound; i += strideRows) { + for (int j = fltWidth / 2; j <= imgWidthBound; j += strideCols) { + // within local views + for (int m = 0; m < (Int_t)B.GetNrows(); m++) { + AFloat value = 0; + + for (int k = i - fltHeight / 2; k <= Int_t(i + (fltHeight - 1) / 2); k++) { + for (int l = j - fltWidth / 2; l <= Int_t(j + (fltWidth - 1) / 2); l++) { + value += B(m, k * imgWidth + l); + } + } + A(m, currLocalView) = value/(fltHeight*fltWidth); + } + currLocalView++; + } + } +} + +//____________________________________________________________________________ +template +void TCpu::AvgPoolLayerBackward(std::vector> &activationGradientsBackward, + const std::vector> &activationGradients, + size_t batchSize, size_t depth, size_t nLocalViews, + size_t fltHeight, size_t fltWidth) +{ + for (size_t i = 0; i < batchSize; i++) { + for (size_t j = 0; j < depth; j++) { + + // initialize to zeros + for (size_t t = 0; t < (size_t)activationGradientsBackward[i].GetNcols(); t++) { + activationGradientsBackward[i](j, t) = 0; + } + + // set values + for (size_t k = 0; k < nLocalViews; k++) { + AFloat grad = activationGradients[i](j, k); + activationGradientsBackward[i](j, k) += grad/(fltHeight*fltWidth); + } + } + } +} + //____________________________________________________________________________ template void TCpu::Reshape(TCpuMatrix &A, const TCpuMatrix &B) diff --git a/tmva/tmva/src/DNN/Architectures/Reference/Propagation.cxx b/tmva/tmva/src/DNN/Architectures/Reference/Propagation.cxx index add2ec67d3ebc..9d191510126d0 100644 --- a/tmva/tmva/src/DNN/Architectures/Reference/Propagation.cxx +++ b/tmva/tmva/src/DNN/Architectures/Reference/Propagation.cxx @@ -381,6 +381,60 @@ void TReference::MaxPoolLayerBackward(std::vector> &activ } } +//______________________________________________________________________________ +template +void TReference::DownsampleAvg(TMatrixT &A, const TMatrixT &B, size_t imgHeight, + size_t imgWidth, size_t fltHeight, size_t fltWidth, size_t strideRows, + size_t strideCols) +{ + // image boudaries + int imgHeightBound = imgHeight - (fltHeight - 1) / 2 - 1; + int imgWidthBound = imgWidth - (fltWidth - 1) / 2 - 1; + size_t currLocalView = 0; + + // centers + for (int i = fltHeight / 2; i <= imgHeightBound; i += strideRows) { + for (int j = fltWidth / 2; j <= imgWidthBound; j += strideCols) { + // within local views + for (int m = 0; m < B.GetNrows(); m++) { + AReal value = 0; + + for (int k = i - Int_t(fltHeight) / 2; k <= i + (Int_t(fltHeight) - 1) / 2; k++) { + for (int l = j - Int_t(fltWidth) / 2; l <= j + (Int_t(fltWidth) - 1) / 2; l++) { + value += B(m, k * imgWidth + l); + } + } + A(m, currLocalView) = (AReal)value/(fltHeight*fltHeight); + } + currLocalView++; + } + } +} + +//______________________________________________________________________________ +template +void TReference::AvgPoolLayerBackward(std::vector> &activationGradientsBackward, + const std::vector> &activationGradients, + size_t batchSize,size_t depth, size_t nLocalViews, + size_t fltHeight,size_t fltWidth) +{ + for (size_t i = 0; i < batchSize; i++) { + for (size_t j = 0; j < depth; j++) { + + // initialize to zeros + for (size_t t = 0; t < (size_t)activationGradientsBackward[i].GetNcols(); t++) { + activationGradientsBackward[i][j][t] = 0; + } + + // set values + for (size_t k = 0; k < nLocalViews; k++) { + AReal grad = activationGradients[i][j][k]; + activationGradientsBackward[i][j][k] = grad/(fHeight*fWidth); + } + } + } +} + //______________________________________________________________________________ template void TReference::Reshape(TMatrixT &A, const TMatrixT &B) diff --git a/tmva/tmva/test/DNN/CNN/TestConvNet.h b/tmva/tmva/test/DNN/CNN/TestConvNet.h index 07f064489615a..963cbb9dbe054 100644 --- a/tmva/tmva/test/DNN/CNN/TestConvNet.h +++ b/tmva/tmva/test/DNN/CNN/TestConvNet.h @@ -133,6 +133,34 @@ auto testDownsample(const typename Architecture::Matrix_t &A, const typename Arc return true; } +/** Downsample the matrix A and check whether the downsampled version + * is equal to B */ +//______________________________________________________________________________ +template +auto testDownsampleAvg(const typename Architecture::Matrix_t &A,const typename Architecture::Matrix_t &B, + size_t imgHeight, size_t imgWidth, size_t fltHeight, + size_t fltWidth, size_t strideRows, size_t strideCols) -> bool +{ + + size_t m1, n1; + m1 = B.GetNrows(); + n1 = B.GetNcols(); + + typename Architecture::Matrix_t ADown(m1, n1); + + Architecture::DownsampleAvg(ADown, A, imgHeight, imgWidth, fltHeight, fltWidth, strideRows, strideCols); + + for (size_t i = 0; i < m1; i++) { + for (size_t j = 0; j < n1; j++) { + if (ADown(i, j) != B(i, j)) { + return false; + } + } + } + + return true; +} + /** Flatten the 3D tensor A using the Flatten function and compare it to * the result in the flat matrix B. */ //______________________________________________________________________________ diff --git a/tmva/tmva/test/DNN/CNN/TestDownsampleAvg.cxx b/tmva/tmva/test/DNN/CNN/TestDownsampleAvg.cxx new file mode 100644 index 0000000000000..848b1adc229b5 --- /dev/null +++ b/tmva/tmva/test/DNN/CNN/TestDownsampleAvg.cxx @@ -0,0 +1,231 @@ +// @(#)root/tmva/tmva/cnn:$Id$ +// Author: Vladimir Ilievski + +/********************************************************************************** + * Project: TMVA - a Root-integrated toolkit for multivariate data analysis * + * Package: TMVA * + * Class : * + * Web : http://tmva.sourceforge.net * + * * + * Description: * + * Testing Downsample method * + * * + * Authors (alphabetical): * + * Vladimir Ilievski - CERN, Switzerland * + * * + * Copyright (c) 2005-2015: * + * CERN, Switzerland * + * U. of Victoria, Canada * + * MPI-K Heidelberg, Germany * + * U. of Bonn, Germany * + * * + * Redistribution and use in source and binary forms, with or without * + * modification, are permitted according to the terms listed in LICENSE * + * (http://tmva.sourceforge.net/LICENSE) * + **********************************************************************************/ + + + +//////////////////////////////////////////////////////////////////// +// Testing the DownsampleAvg function // +//////////////////////////////////////////////////////////////////// + +#include +#include + +#include "TMVA/DNN/Architectures/Reference.h" +#include "TestConvNet.h" + +using namespace TMVA::DNN; +using namespace TMVA::DNN::CNN; +using Matrix_t = typename TReference::Matrix_t; + + +inline bool isInteger(double x) {return x == floor(x);} + +size_t calculateDimension(size_t imgDim, + size_t fltDim, + size_t padding, + size_t stride) +{ + double dimension = ((imgDim - fltDim + 2 * padding) / stride) + 1; + if(!isInteger(dimension)) { + std::cout << "Not compatible hyper parameters" << std::endl; + std::exit(EXIT_FAILURE); + } + + return (size_t) dimension; +} + + + +/************************************************************************* + * Test 1: + * depth = 2, image height = 4, image width = 5, + * frame depth = 2, filter height = 2, filter width = 2, + * stride rows = 2, stride cols = 1, + * zero-padding height = 0, zero-padding width = 0, + *************************************************************************/ + +void test1() +{ + + double imgTest1[][20] = + { + {166, 212, 213, 150, 114, + 119, 109, 115, 88, 144, + 227, 208, 208, 235, 57, + 57, 165, 250, 139, 76}, + + { 57, 255, 184, 162, 204, + 220, 11, 192, 183, 174, + 2, 153, 183, 175, 10, + 55, 123, 246, 138, 80} + }; + + + double answerTest1[][8] = + { + {151.5, 162.25, 141.5, 124, + 164.25, 207.75, 208, 126.75}, + + {135.75, 160.5, 180.25, 180.75, + 83.25, 176.25, 185.5, 100.75} + }; + + + size_t imgDepthTest1 = 2; + size_t imgHeightTest1 = 4; + size_t imgWidthTest1 = 5; + size_t fltHeightTest1 = 2; + size_t fltWidthTest1 = 2; + size_t strideRowsTest1 = 2; + size_t strideColsTest1 = 1; + + + Matrix_t A(imgDepthTest1, imgHeightTest1 * imgWidthTest1); + + for(size_t i = 0; i < (size_t) A.GetNrows(); i++){ + for(size_t j = 0; j < (size_t) A.GetNcols(); j++){ + A(i, j) = imgTest1[i][j]; + } + } + + + size_t height = calculateDimension(imgHeightTest1, fltHeightTest1, + 0, strideRowsTest1); + + size_t width = calculateDimension(imgWidthTest1, fltWidthTest1, + 0, strideColsTest1); + + + Matrix_t idx(imgDepthTest1, height * width); + Matrix_t B(imgDepthTest1, height * width); + + for(size_t i = 0; i < (size_t) B.GetNrows(); i++){ + for(size_t j = 0; j < (size_t) B.GetNcols(); j++){ + B(i, j) = answerTest1[i][j]; + } + } + + + + bool status = testDownsampleAvg>(A, B, + imgHeightTest1, imgWidthTest1, + fltHeightTest1, fltWidthTest1, + strideRowsTest1, strideColsTest1); + + if(status) + std::cout << "Test passed!" << std::endl; + else + std::cout << "Test not passed!" << std::endl; +} + +/************************************************************************* + * Test 1: + * depth = 1, image height = 6, image width = 6, + * frame depth = 1, filter height = 2, filter width = 3, + * stride rows = 1, stride cols = 3, + * zero-padding height = 0, zero-padding width = 0, + *************************************************************************/ + +void test2() +{ + + double imgTest2[][36] = + { + {200, 79, 69, 58, 98, 168, + 49, 230, 21, 141, 218, 38, + 72, 224, 14, 65, 147, 105, + 38, 27, 111, 160, 200, 48, + 109, 104, 153, 149, 233, 11, + 16, 91, 236, 183, 166, 155} + }; + + + double answerTest2[][10] = + { + {108, 120.167, + 101.667, 119, + 81, 120.833, + 90.333, 133.5, + 118.167, 149.5} + }; + + size_t imgDepthTest2 = 1; + size_t imgHeightTest2 = 6; + size_t imgWidthTest2 = 6; + size_t fltHeightTest2 = 2; + size_t fltWidthTest2 = 3; + size_t strideRowsTest2 = 1; + size_t strideColsTest2 = 3; + + + Matrix_t A(imgDepthTest2, imgHeightTest2 * imgWidthTest2); + + for(size_t i = 0; i < (size_t) A.GetNrows(); i++){ + for(size_t j = 0; j < (size_t) A.GetNcols(); j++){ + A(i, j) = imgTest2[i][j]; + } + } + + + size_t height = calculateDimension(imgHeightTest2, fltHeightTest2, + 0, strideRowsTest2); + + size_t width = calculateDimension(imgWidthTest2, fltWidthTest2, + 0, strideColsTest2); + + + Matrix_t idx(imgDepthTest2, height * width); + Matrix_t B(imgDepthTest2, height * width); + + for(size_t i = 0; i < (size_t) B.GetNrows(); i++){ + for(size_t j = 0; j < (size_t) B.GetNcols(); j++){ + B(i, j) = answerTest2[i][j]; + } + } + + + + bool status = testDownsampleAvg>(A, B, + imgHeightTest2, imgWidthTest2, + fltHeightTest2, fltWidthTest2, + strideRowsTest2, strideColsTest2); + + if(status) + std::cout << "Test passed!" << std::endl; + else + std::cout << "Test not passed!" << std::endl; +} + + +int main(){ + std::cout << "Testing Downsample function:" << std::endl; + + std::cout << "Test 1: " << std::endl; + test1(); + + std::cout << "Test 2: " << std::endl; + test2(); +}