From e05c2e0f1dea20af4f27b97c0798fc261d1b492a Mon Sep 17 00:00:00 2001 From: Joe Date: Tue, 17 Nov 2020 13:58:42 -0600 Subject: [PATCH 01/11] Fix Reduce Mean error for MobileNets DNN Fix for index error for Reduce Mean Correct Reduce Mean indexing error --- modules/dnn/src/onnx/onnx_importer.cpp | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 56683f4c14a6..01d84d9711f2 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -494,14 +494,17 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) MatShape inpShape = outShapes[node_proto.input(0)]; DictValue axes = layerParams.get("axes"); bool keepdims = layerParams.get("keepdims"); - MatShape targetShape = inpShape; + MatShape targetShape; + std::vector shouldDelete(inpShape.size(), false); for (int i = 0; i < axes.size(); i++) { int axis = clamp(axes.get(i), inpShape.size()); - if (keepdims) { - targetShape[axis] = 1; - } else { - targetShape.erase(targetShape.begin() + axis); - } + shouldDelete[axis] = true; + } + for (int axis = 0; axis < inpShape.size(); ++axis){ + if (!shouldDelete[axis]) + targetShape.push_back(inpShape[axis]); + else if (keepdims) + targetShape.push_back(1); } if (inpShape.size() == 3 && axes.size() <= 2) From bc434e8f67d488dfc334efde894c31dbba003d76 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 20 Nov 2020 20:32:59 +0000 Subject: [PATCH 02/11] calib3d: eliminate 'register' build warning --- modules/calib3d/src/sqpnp.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/calib3d/src/sqpnp.cpp b/modules/calib3d/src/sqpnp.cpp index 10ea96c42359..7117e61c96c2 100644 --- a/modules/calib3d/src/sqpnp.cpp +++ b/modules/calib3d/src/sqpnp.cpp @@ -631,7 +631,7 @@ void PoseSolver::computeRowAndNullspace(const cv::Matx& r, void PoseSolver::nearestRotationMatrix(const cv::Matx& e, cv::Matx& r) { - register int i; + int i; double l, lprev, det_e, e_sq, adj_e_sq, adj_e[9]; // e's adjoint From ce31c9c448e093600b6daccbc82026c972a818b7 Mon Sep 17 00:00:00 2001 From: Jiri Kucera Date: Tue, 14 Apr 2020 14:23:43 +0200 Subject: [PATCH 03/11] core(matrix): Negative values checks Add checks that prevents indexing an array by negative values. --- modules/core/src/matrix_wrap.cpp | 32 +++---- modules/core/test/test_mat.cpp | 153 +++++++++++++++++++++++++++++++ 2 files changed, 165 insertions(+), 20 deletions(-) diff --git a/modules/core/src/matrix_wrap.cpp b/modules/core/src/matrix_wrap.cpp index 4c5efd6ba55e..0a8d6c12d747 100644 --- a/modules/core/src/matrix_wrap.cpp +++ b/modules/core/src/matrix_wrap.cpp @@ -947,7 +947,7 @@ bool _InputArray::isContinuous(int i) const if( k == STD_ARRAY_MAT ) { const Mat* vv = (const Mat*)obj; - CV_Assert(i > 0 && i < sz.height); + CV_Assert(i >= 0 && i < sz.height); return vv[i].isContinuous(); } @@ -981,21 +981,21 @@ bool _InputArray::isSubmatrix(int i) const if( k == STD_VECTOR_MAT ) { const std::vector& vv = *(const std::vector*)obj; - CV_Assert((size_t)i < vv.size()); + CV_Assert(i >= 0 && (size_t)i < vv.size()); return vv[i].isSubmatrix(); } if( k == STD_ARRAY_MAT ) { const Mat* vv = (const Mat*)obj; - CV_Assert(i < sz.height); + CV_Assert(i >= 0 && i < sz.height); return vv[i].isSubmatrix(); } if( k == STD_VECTOR_UMAT ) { const std::vector& vv = *(const std::vector*)obj; - CV_Assert((size_t)i < vv.size()); + CV_Assert(i >= 0 && (size_t)i < vv.size()); return vv[i].isSubmatrix(); } @@ -1026,9 +1026,7 @@ size_t _InputArray::offset(int i) const if( k == STD_VECTOR_MAT ) { const std::vector& vv = *(const std::vector*)obj; - if( i < 0 ) - return 1; - CV_Assert( i < (int)vv.size() ); + CV_Assert( i >= 0 && i < (int)vv.size() ); return (size_t)(vv[i].ptr() - vv[i].datastart); } @@ -1036,16 +1034,14 @@ size_t _InputArray::offset(int i) const if( k == STD_ARRAY_MAT ) { const Mat* vv = (const Mat*)obj; - if( i < 0 ) - return 1; - CV_Assert( i < sz.height ); + CV_Assert( i >= 0 && i < sz.height ); return (size_t)(vv[i].ptr() - vv[i].datastart); } if( k == STD_VECTOR_UMAT ) { const std::vector& vv = *(const std::vector*)obj; - CV_Assert((size_t)i < vv.size()); + CV_Assert(i >= 0 && (size_t)i < vv.size()); return vv[i].offset; } @@ -1059,7 +1055,7 @@ size_t _InputArray::offset(int i) const if (k == STD_VECTOR_CUDA_GPU_MAT) { const std::vector& vv = *(const std::vector*)obj; - CV_Assert((size_t)i < vv.size()); + CV_Assert(i >= 0 && (size_t)i < vv.size()); return (size_t)(vv[i].data - vv[i].datastart); } @@ -1089,25 +1085,21 @@ size_t _InputArray::step(int i) const if( k == STD_VECTOR_MAT ) { const std::vector& vv = *(const std::vector*)obj; - if( i < 0 ) - return 1; - CV_Assert( i < (int)vv.size() ); + CV_Assert( i >= 0 && i < (int)vv.size() ); return vv[i].step; } if( k == STD_ARRAY_MAT ) { const Mat* vv = (const Mat*)obj; - if( i < 0 ) - return 1; - CV_Assert( i < sz.height ); + CV_Assert( i >= 0 && i < sz.height ); return vv[i].step; } if( k == STD_VECTOR_UMAT ) { const std::vector& vv = *(const std::vector*)obj; - CV_Assert((size_t)i < vv.size()); + CV_Assert(i >= 0 && (size_t)i < vv.size()); return vv[i].step; } @@ -1119,7 +1111,7 @@ size_t _InputArray::step(int i) const if (k == STD_VECTOR_CUDA_GPU_MAT) { const std::vector& vv = *(const std::vector*)obj; - CV_Assert((size_t)i < vv.size()); + CV_Assert(i >= 0 && (size_t)i < vv.size()); return vv[i].step; } diff --git a/modules/core/test/test_mat.cpp b/modules/core/test/test_mat.cpp index 58eafd074821..90ebd0475544 100644 --- a/modules/core/test/test_mat.cpp +++ b/modules/core/test/test_mat.cpp @@ -9,6 +9,8 @@ #include "opencv2/core/eigen.hpp" #endif +#include "opencv2/core/cuda.hpp" + namespace opencv_test { namespace { class Core_ReduceTest : public cvtest::BaseTest @@ -1984,6 +1986,157 @@ TEST(Core_InputArray, fetch_MatExpr) } +#ifdef CV_CXX11 +class TestInputArrayRangeChecking { + static const char *kind2str(cv::_InputArray ia) + { + switch (ia.kind()) + { + #define C(x) case cv::_InputArray::x: return #x + C(MAT); + C(UMAT); + C(EXPR); + C(MATX); + C(STD_VECTOR); + C(STD_ARRAY); + C(NONE); + C(STD_VECTOR_VECTOR); + C(STD_BOOL_VECTOR); + C(STD_VECTOR_MAT); + C(STD_ARRAY_MAT); + C(STD_VECTOR_UMAT); + C(CUDA_GPU_MAT); + C(STD_VECTOR_CUDA_GPU_MAT); + #undef C + default: + return ""; + } + } + + static void banner(cv::_InputArray ia, const char *label, const char *name) + { + std::cout << std::endl + << label << " = " << name << ", Kind: " << kind2str(ia) + << std::endl; + } + + template + static void testA(I ia, F f, const char *mfname) + { + banner(ia, "f", mfname); + EXPECT_THROW(f(ia, -1), cv::Exception) + << "f(ia, " << -1 << ") should throw cv::Exception"; + for (int i = 0; i < int(ia.size()); i++) + { + EXPECT_NO_THROW(f(ia, i)) + << "f(ia, " << i << ") should not throw an exception"; + } + EXPECT_THROW(f(ia, int(ia.size())), cv::Exception) + << "f(ia, " << ia.size() << ") should throw cv::Exception"; + } + + template + static void testB(I ia, F f, const char *mfname) + { + banner(ia, "f", mfname); + EXPECT_THROW(f(ia, -1), cv::Exception) + << "f(ia, " << -1 << ") should throw cv::Exception"; + for (int i = 0; i < int(ia.size()); i++) + { + EXPECT_NO_THROW(f(ia, i)) + << "f(ia, " << i << ") should not throw an exception"; + } + EXPECT_THROW(f(ia, int(ia.size())), cv::Exception) + << "f(ia, " << ia.size() << ") should throw cv::Exception"; + } + + static void test_isContinuous() + { + auto f = [](cv::_InputArray ia, int i) { (void)ia.isContinuous(i); }; + + cv::Mat M; + cv::UMat uM; + + std::vector vec = {M, M}; + std::array arr = {M, M}; + std::vector uvec = {uM, uM}; + + testA(vec, f, "isContinuous"); + testA(arr, f, "isContinuous"); + testA(uvec, f, "isContinuous"); + } + + static void test_isSubmatrix() + { + auto f = [](cv::_InputArray ia, int i) { (void)ia.isSubmatrix(i); }; + + cv::Mat M; + cv::UMat uM; + + std::vector vec = {M, M}; + std::array arr = {M, M}; + std::vector uvec = {uM, uM}; + + testA(vec, f, "isSubmatrix"); + testA(arr, f, "isSubmatrix"); + testA(uvec, f, "isSubmatrix"); + } + + static void test_offset() + { + auto f = [](cv::_InputArray ia, int i) { return ia.offset(i); }; + + cv::Mat M; + cv::UMat uM; + cv::cuda::GpuMat gM; + + std::vector vec = {M, M}; + std::array arr = {M, M}; + std::vector uvec = {uM, uM}; + std::vector gvec = {gM, gM}; + + testB(vec, f, "offset"); + testB(arr, f, "offset"); + testB(uvec, f, "offset"); + testB(gvec, f, "offset"); + } + + static void test_step() + { + auto f = [](cv::_InputArray ia, int i) { return ia.step(i); }; + + cv::Mat M; + cv::UMat uM; + cv::cuda::GpuMat gM; + + std::vector vec = {M, M}; + std::array arr = {M, M}; + std::vector uvec = {uM, uM}; + std::vector gvec = {gM, gM}; + + testB(vec, f, "step"); + testB(arr, f, "step"); + testB(uvec, f, "step"); + testB(gvec, f, "step"); + } + +public: + static void run() + { + test_isContinuous(); + test_isSubmatrix(); + test_offset(); + test_step(); + } +}; + +TEST(Core_InputArray, range_checking) +{ + TestInputArrayRangeChecking::run(); +} +#endif + + TEST(Core_Vectors, issue_13078) { float floats_[] = { 1, 2, 3, 4, 5, 6, 7, 8 }; From 632a08ff4012aafc9988eea9f52947b868e8b6e0 Mon Sep 17 00:00:00 2001 From: Hollow Man Date: Sun, 22 Nov 2020 00:00:07 +0800 Subject: [PATCH 04/11] Fix typo in docs adatapted -> adapted --- .../how_to_use_OpenCV_parallel_for_.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tutorials/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.markdown b/doc/tutorials/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.markdown index eeeb94b4c4b8..9968cdb257a6 100644 --- a/doc/tutorials/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.markdown +++ b/doc/tutorials/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.markdown @@ -32,7 +32,7 @@ automatically available with the platform (e.g. APPLE GCD) but chances are that have access to a parallel framework either directly or by enabling the option in CMake and rebuild the library. The second (weak) precondition is more related to the task you want to achieve as not all computations -are suitable / can be adatapted to be run in a parallel way. To remain simple, tasks that can be split +are suitable / can be adapted to be run in a parallel way. To remain simple, tasks that can be split into multiple elementary operations with no memory dependency (no possible race condition) are easily parallelizable. Computer vision processing are often easily parallelizable as most of the time the processing of one pixel does not depend to the state of other pixels. From 5a3a915a9ba35b31808577245f71d468319d311c Mon Sep 17 00:00:00 2001 From: Or Avital Date: Sun, 22 Nov 2020 14:19:20 +0200 Subject: [PATCH 05/11] Remove unnecessary condition (will never reach) --- modules/core/src/copy.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/core/src/copy.cpp b/modules/core/src/copy.cpp index 7f4329df7823..dcd585d83403 100644 --- a/modules/core/src/copy.cpp +++ b/modules/core/src/copy.cpp @@ -1032,8 +1032,7 @@ void flip( InputArray _src, OutputArray _dst, int flip_mode ) } if ((size.width == 1 && flip_mode > 0) || - (size.height == 1 && flip_mode == 0) || - (size.height == 1 && size.width == 1 && flip_mode < 0)) + (size.height == 1 && flip_mode == 0)) { return _src.copyTo(_dst); } From ac418e999defbfb9852d20a6884118153e7a7151 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sun, 22 Nov 2020 16:28:53 +0000 Subject: [PATCH 06/11] cmake: update condition for find_package(Eigen3 CONFIG) --- cmake/OpenCVFindLibsPerf.cmake | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmake/OpenCVFindLibsPerf.cmake b/cmake/OpenCVFindLibsPerf.cmake index 7b3874ff0e68..3753084d28c8 100644 --- a/cmake/OpenCVFindLibsPerf.cmake +++ b/cmake/OpenCVFindLibsPerf.cmake @@ -51,7 +51,10 @@ endif(WITH_CUDA) # --- Eigen --- if(WITH_EIGEN AND NOT HAVE_EIGEN) - if(NOT OPENCV_SKIP_EIGEN_FIND_PACKAGE_CONFIG) + if((OPENCV_FORCE_EIGEN_FIND_PACKAGE_CONFIG + OR NOT (CMAKE_VERSION VERSION_LESS "3.0.0") # Eigen3Targets.cmake required CMake 3.0.0+ + ) AND NOT OPENCV_SKIP_EIGEN_FIND_PACKAGE_CONFIG + ) find_package(Eigen3 CONFIG QUIET) # Ceres 2.0.0 CMake scripts doesn't work with CMake's FindEigen3.cmake module (due to missing EIGEN3_VERSION_STRING) endif() if(NOT Eigen3_FOUND) From f4f462c50bd3e3c25c8ad7b88e3bfaca49d0f702 Mon Sep 17 00:00:00 2001 From: Sergei Slashchinin <62052793+sl-sergei@users.noreply.github.com> Date: Tue, 24 Nov 2020 19:52:45 +0300 Subject: [PATCH 07/11] Merge pull request #18862 from sl-sergei:support_pool1d Support for Pool1d layer for OpenCV and OpenCL targets * Initial version of Pool1d support * Fix variable naming * Fix 1d pooling for OpenCL * Change support logic, remove unnecessary variable, split the tests * Remove other depricated variables * Fix warning. Check tests * Change support check logic * Change support check logic, 2 --- .../dnn/include/opencv2/dnn/all_layers.hpp | 2 - modules/dnn/src/layers/pooling_layer.cpp | 186 +++++++++++------- modules/dnn/src/ocl4dnn/src/ocl4dnn_pool.cpp | 16 +- modules/dnn/test/test_onnx_importer.cpp | 78 ++++++++ 4 files changed, 205 insertions(+), 77 deletions(-) diff --git a/modules/dnn/include/opencv2/dnn/all_layers.hpp b/modules/dnn/include/opencv2/dnn/all_layers.hpp index c9455ab528ab..ffc2568a89c7 100644 --- a/modules/dnn/include/opencv2/dnn/all_layers.hpp +++ b/modules/dnn/include/opencv2/dnn/all_layers.hpp @@ -248,8 +248,6 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN int type; std::vector kernel_size, strides; std::vector pads_begin, pads_end; - CV_DEPRECATED_EXTERNAL Size kernel, stride, pad; - CV_DEPRECATED_EXTERNAL int pad_l, pad_t, pad_r, pad_b; bool globalPooling; //!< Flag is true if at least one of the axes is global pooled. std::vector isGlobalPooling; bool computeMaxIdx; diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index 98417620ed49..d4eb1f174a4a 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -85,8 +85,6 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer computeMaxIdx = true; globalPooling = false; isGlobalPooling = std::vector(3, false); - stride = Size(1, 1); - pad_t = pad_l = pad_b = pad_r = 0; hasDynamicShapes = params.get("has_dynamic_shapes", false); shapesInitialized = !hasDynamicShapes; @@ -108,16 +106,6 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer getPoolingKernelParams(params, kernel_size, isGlobalPooling, pads_begin, pads_end, strides, padMode); globalPooling = isGlobalPooling[0] || isGlobalPooling[1] || isGlobalPooling[2]; - if (kernel_size.size() == 2) { - kernel = Size(kernel_size[1], kernel_size[0]); - stride = Size(strides[1], strides[0]); - pad = Size(pads_begin[1], pads_begin[0]); - - pad_t = pads_begin[0]; - pad_l = pads_begin[1]; - pad_b = pads_end[0]; - pad_r = pads_end[1]; - } } else if (params.has("pooled_w") || params.has("pooled_h")) { @@ -165,17 +153,20 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer finalKernel.push_back(isGlobalPooling[idx] ? inp[i] : kernel_size[idx]); } kernel_size = finalKernel; - kernel = Size(kernel_size[1], kernel_size[0]); } getConvPoolPaddings(inp, kernel_size, strides, padMode, pads_begin, pads_end); - if (pads_begin.size() == 2) { - pad_t = pads_begin[0]; - pad_l = pads_begin[1]; - pad_b = pads_end[0]; - pad_r = pads_end[1]; + + if (inputs[0].dims == 3) + { + //Pool1D + kernel_size.erase(kernel_size.begin() + 1); + strides.erase(strides.begin() + 1); + pads_begin.erase(pads_begin.begin() + 1); + pads_end.erase(pads_end.begin() + 1); } + #ifdef HAVE_OPENCL poolOp.release(); #endif @@ -191,9 +182,11 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer return false; if (kernel_size.size() == 3) return preferableTarget == DNN_TARGET_CPU; + if (kernel_size.size() == 1) + return false; if (preferableTarget == DNN_TARGET_MYRIAD) { #if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1) - if (type == MAX && (pad_l == 1 && pad_t == 1) && stride == Size(2, 2) ) { + if (type == MAX && (pads_begin[1] == 1 && pads_begin[0] == 1) && (strides[0] == 2 && strides[1] == 2)) { return !isMyriadX(); } #endif @@ -205,19 +198,23 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer #endif if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { - return !computeMaxIdx && type != STOCHASTIC; + return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1; } - else if (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE) + else if (backendId == DNN_BACKEND_OPENCV) { if (kernel_size.size() == 3) - return (backendId == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU); - if (kernel_size.empty() || kernel_size.size() == 2) - return backendId == DNN_BACKEND_OPENCV || - (backendId == DNN_BACKEND_HALIDE && haveHalide() && - (type == MAX || (type == AVE && !pad_t && !pad_l && !pad_b && !pad_r))); + return preferableTarget == DNN_TARGET_CPU; + if (kernel_size.size() <= 2) + return true; else return false; } + else if (backendId == DNN_BACKEND_HALIDE) + { + if (kernel_size.empty() || kernel_size.size() == 2) + return haveHalide() && + (type == MAX || (type == AVE && !pads_begin[0] && !pads_begin[1] && !pads_end[0] && !pads_end[1])); + } return false; } @@ -237,12 +234,25 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer config.in_shape = shape(inputs[0]); config.out_shape = shape(outputs[0]); - config.kernel = kernel; - config.pad_l = pad_l; - config.pad_t = pad_t; - config.pad_r = pad_r; - config.pad_b = pad_b; - config.stride = stride; + if (inputs[0].dims == 3) + { + //Pool1D + config.kernel = Size(kernel_size[0], 1); + config.stride = Size(strides[0], 1); + config.pad_l = pads_begin[0]; + config.pad_t = 0; + config.pad_r = pads_end[0]; + config.pad_b = 0; + } + else + { + config.kernel = Size(kernel_size[1], kernel_size[0]); + config.stride = Size(strides[1], strides[0]); + config.pad_l = pads_begin[1]; + config.pad_t = pads_begin[0]; + config.pad_r = pads_end[1]; + config.pad_b = pads_end[0]; + } config.channels = inputs[0].size[1]; config.pool_method = type == MAX ? LIBDNN_POOLING_METHOD_MAX : (type == AVE ? LIBDNN_POOLING_METHOD_AVE : @@ -428,7 +438,6 @@ virtual Ptr initNgraph(const std::vector >& inp public: const Mat* src, *rois; Mat *dst, *mask; - Size kernel, stride; int pad_l, pad_t, pad_r, pad_b; bool avePoolPaddedArea; int nstripes; @@ -453,7 +462,7 @@ virtual Ptr initNgraph(const std::vector >& inp CV_Assert_N( src.isContinuous(), dst.isContinuous(), src.type() == CV_32F, src.type() == dst.type(), - src.dims == 4 || src.dims == 5, dst.dims == 4 || dst.dims == 5, + src.dims == 3 || src.dims == 4 || src.dims == 5, dst.dims == 3 || dst.dims == 4 || dst.dims == 5, (((poolingType == ROI || poolingType == PSROI) && dst.size[0] == rois.size[0]) || src.size[0] == dst.size[0]), poolingType == PSROI || src.size[1] == dst.size[1], @@ -461,6 +470,9 @@ virtual Ptr initNgraph(const std::vector >& inp PoolingInvoker p; + bool isPool1D = src.dims == 3; + bool isPool3D = src.dims == 5; + p.src = &src; p.rois = &rois; p.dst = &dst; @@ -471,12 +483,10 @@ virtual Ptr initNgraph(const std::vector >& inp p.pads_end = pads_end; p.mask = &mask; - p.kernel = Size(kernel_size[1], kernel_size[0]); - p.stride = Size(strides[1], strides[0]); p.pad_l = pads_begin.back(); - p.pad_t = pads_begin[pads_begin.size() - 2]; + p.pad_t = isPool1D ? 0 : pads_begin[pads_begin.size() - 2]; p.pad_r = pads_end.back(); - p.pad_b = pads_end[pads_end.size() - 2]; + p.pad_b = isPool1D ? 0 : pads_end[pads_end.size() - 2]; p.avePoolPaddedArea = avePoolPaddedArea; p.nstripes = nstripes; @@ -486,11 +496,11 @@ virtual Ptr initNgraph(const std::vector >& inp if( !computeMaxIdx ) { - int height = src.size[src.dims - 2]; + int height = isPool1D ? 1 : src.size[src.dims - 2]; int width = src.size[src.dims - 1]; - int kernel_d = (kernel_size.size() == 3) ? kernel_size[0] : 1; - int kernel_h = kernel_size[kernel_size.size() - 2]; + int kernel_d = isPool3D ? kernel_size[0] : 1; + int kernel_h = isPool1D ? 1 : kernel_size[kernel_size.size() - 2]; int kernel_w = kernel_size.back(); p.ofsbuf.resize(kernel_d * kernel_h * kernel_w); @@ -510,13 +520,15 @@ virtual Ptr initNgraph(const std::vector >& inp { int channels = dst->size[1]; + bool isPool3D = src->dims == 5; bool isPool2D = src->dims == 4; - int depth = !isPool2D? dst->size[2] : 1; - int height = dst->size[dst->dims - 2]; + bool isPool1D = src->dims == 3; + int depth = isPool3D? dst->size[2] : 1; + int height = isPool1D? 1 : dst->size[dst->dims - 2]; int width = dst->size[dst->dims - 1]; - int inp_depth = !isPool2D? src->size[2] : 1; - int inp_height = src->size[src->dims - 2]; + int inp_depth = isPool3D? src->size[2] : 1; + int inp_height = isPool1D? 1 : src->size[src->dims - 2]; int inp_width = src->size[src->dims - 1]; size_t total = dst->total(); @@ -524,12 +536,12 @@ virtual Ptr initNgraph(const std::vector >& inp size_t stripeStart = r.start*stripeSize; size_t stripeEnd = std::min(r.end*stripeSize, total); - int kernel_d = !isPool2D? kernel_size[0] : 1; - int kernel_h = kernel_size[kernel_size.size() - 2]; + int kernel_d = isPool3D? kernel_size[0] : 1; + int kernel_h = isPool1D? 1 : kernel_size[kernel_size.size() - 2]; int kernel_w = kernel_size.back(); - int stride_d = !isPool2D? strides[0] : 0; - int stride_h = strides[strides.size() - 2]; + int stride_d = isPool3D? strides[0] : 0; + int stride_h = isPool1D? 1 :strides[strides.size() - 2]; int stride_w = strides.back(); bool compMaxIdx = computeMaxIdx; @@ -720,7 +732,24 @@ virtual Ptr initNgraph(const std::vector >& inp } } else +#else + CV_UNUSED(isPool2D); #endif + if( isPool1D ) + { + const float* first = srcData + xstart; + const float* last = srcData + xend; + const float* max_elem = std::max_element(first, last); + if (max_elem!=last) + { + dstData[x0] = *max_elem; + if( compMaxIdx ) + { + dstMaskData[x0] = std::distance(first, max_elem); + } + } + } + else { float max_val = -FLT_MAX; if( compMaxIdx ) @@ -794,6 +823,14 @@ virtual Ptr initNgraph(const std::vector >& inp } else #endif + if( isPool1D ) + { + const float* first = srcData + xstart; + const float* last = srcData + xend; + float sum_val = std::accumulate(first, last, 0.f); + dstData[x0] = sum_val*inv_kernel_area; + } + else { float sum_val = 0.f; for (int d = dstart; d < dend; ++d) { @@ -907,20 +944,26 @@ virtual Ptr initNgraph(const std::vector >& inp Halide::Buffer inputBuffer = halideBuffer(inputs[0]); const int inWidth = inputBuffer.width(); const int inHeight = inputBuffer.height(); + const size_t kernelHeight = kernel_size[0]; + const size_t kernelWidth = kernel_size[1]; + const size_t strideHeight = strides[0]; + const size_t strideWidth = strides[1]; + const size_t paddingTop = pads_begin[0]; + const size_t paddingLeft = pads_begin[1]; Halide::Var x("x"), y("y"), c("c"), n("n"); Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name)); - Halide::RDom r(0, kernel.width, 0, kernel.height); + Halide::RDom r(0, kernelWidth, 0, kernelHeight); Halide::Expr kx, ky; - if(pad_l || pad_t) + if(paddingLeft || paddingTop) { - kx = clamp(x * stride.width + r.x - pad_l, 0, inWidth - 1); - ky = clamp(y * stride.height + r.y - pad_t, 0, inHeight - 1); + kx = clamp(x * strideWidth + r.x - paddingLeft, 0, inWidth - 1); + ky = clamp(y * strideHeight + r.y - paddingTop, 0, inHeight - 1); } else { - kx = min(x * stride.width + r.x, inWidth - 1); - ky = min(y * stride.height + r.y, inHeight - 1); + kx = min(x * strideWidth + r.x, inWidth - 1); + ky = min(y * strideHeight + r.y, inHeight - 1); } // Halide::argmax returns tuple (r.x, r.y, max). @@ -928,17 +971,17 @@ virtual Ptr initNgraph(const std::vector >& inp // Compute offset from argmax in range [0, kernel_size). Halide::Expr max_index; - if(pad_l || pad_t) + if(paddingLeft || paddingTop) { - max_index = clamp(y * stride.height + res[1] - pad_t, + max_index = clamp(y * strideHeight + res[1] - paddingTop, 0, inHeight - 1) * inWidth + - clamp(x * stride.width + res[0] - pad_l, + clamp(x * strideWidth + res[0] - paddingLeft, 0, inWidth - 1); } else { - max_index = min(y * stride.height + res[1], inHeight - 1) * inWidth + - min(x * stride.width + res[0], inWidth - 1); + max_index = min(y * strideHeight + res[1], inHeight - 1) * inWidth + + min(x * strideWidth + res[0], inWidth - 1); } top(x, y, c, n) = { res[2], Halide::cast(max_index) }; return Ptr(new HalideBackendNode(top)); @@ -952,21 +995,25 @@ virtual Ptr initNgraph(const std::vector >& inp Halide::Buffer inputBuffer = halideBuffer(inputs[0]); const int inW = inputBuffer.width(), inH = inputBuffer.height(); - if ((inW - kernel.width) % stride.width || (inH - kernel.height) % stride.height) + const size_t kernelHeight = kernel_size[0]; + const size_t kernelWidth = kernel_size[1]; + const size_t strideHeight = strides[0]; + const size_t strideWidth = strides[1]; + if ((inW - kernelWidth) % strideWidth || (inH - kernelHeight) % strideHeight) { CV_Error(cv::Error::StsNotImplemented, "Halide backend for average pooling with partial " "kernels is not implemented"); } - const float norm = 1.0f / (kernel.width * kernel.height); + const float norm = 1.0f / (kernelWidth * kernelHeight); Halide::Var x("x"), y("y"), c("c"), n("n"); Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name)); - Halide::RDom r(0, kernel.width, 0, kernel.height); + Halide::RDom r(0, kernelWidth, 0, kernelHeight); top(x, y, c, n) = sum( - inputBuffer(x * stride.width + r.x, - y * stride.height + r.y, c, n)) * norm; + inputBuffer(x * strideWidth + r.x, + y * strideHeight + r.y, c, n)) * norm; return Ptr(new HalideBackendNode(top)); #endif // HAVE_HALIDE return Ptr(); @@ -1028,6 +1075,7 @@ virtual Ptr initNgraph(const std::vector >& inp { CV_Assert(inputs.size() != 0); + bool isPool1D = inputs[0].size() == 3; std::vector inpShape(inputs[0].begin() + 2, inputs[0].end()); std::vector outShape(inputs[0].begin(), inputs[0].begin() + 2); @@ -1056,14 +1104,15 @@ virtual Ptr initNgraph(const std::vector >& inp } else if (padMode.empty()) { - for (int i = 0; i < local_kernel.size(); i++) { + int addedDims = isPool1D? inpShape.size() : local_kernel.size(); + for (int i = 0; i < addedDims; i++) { float dst = (float) (inpShape[i] + pads_begin[i] + pads_end[i] - local_kernel[i]) / strides[i]; outShape.push_back(1 + (ceilMode ? ceil(dst) : floor(dst))); } // If we have padding, ensure that the last pooling starts strictly // inside the image (instead of at the padding); otherwise clip the last. - for (int i = 0; i < pads_end.size(); i++) { + for (int i = 0; i < addedDims; i++) { if (pads_end[i] && (outShape[2 + i] - 1) * strides[i] >= inpShape[i] + pads_end[i]) { --outShape[2 + i]; CV_Assert((outShape[2 + i] - 1) * strides[i] < inpShape[i] + pads_end[i]); @@ -1107,7 +1156,8 @@ virtual Ptr initNgraph(const std::vector >& inp { CV_UNUSED(inputs); // suppress unused variable warning long flops = 0; - size_t karea = std::accumulate(kernel_size.begin(), kernel_size.end(), + bool isPool1D = inputs[0].size() == 3; + size_t karea = std::accumulate(kernel_size.begin(), isPool1D? kernel_size.begin() + 1 : kernel_size.end(), 1, std::multiplies()); for(int i = 0; i < outputs.size(); i++) { diff --git a/modules/dnn/src/ocl4dnn/src/ocl4dnn_pool.cpp b/modules/dnn/src/ocl4dnn/src/ocl4dnn_pool.cpp index 47b40cc6c232..b366c97ac8cf 100644 --- a/modules/dnn/src/ocl4dnn/src/ocl4dnn_pool.cpp +++ b/modules/dnn/src/ocl4dnn/src/ocl4dnn_pool.cpp @@ -51,18 +51,20 @@ template OCL4DNNPool::OCL4DNNPool(OCL4DNNPoolConfig config) { int dims = config.in_shape.size(); - int spatial_dims = 2; + int spatial_dims = config.in_shape.size()-2; channels_ = config.channels; pool_method_ = config.pool_method; avePoolPaddedArea = config.avePoolPaddedArea; computeMaxIdx = config.computeMaxIdx; use_half = config.use_half; + kernel_shape_.push_back(config.kernel.height); + kernel_shape_.push_back(config.kernel.width); + stride_.push_back(config.stride.height); + stride_.push_back(config.stride.width); for (int i = 0; i < spatial_dims; ++i) { - kernel_shape_.push_back(i == 0 ? config.kernel.height : config.kernel.width); - stride_.push_back(i == 0 ? config.stride.height : config.stride.width); im_in_shape_.push_back(config.in_shape[dims - spatial_dims + i]); im_out_shape_.push_back(config.out_shape[dims - spatial_dims + i]); } @@ -75,10 +77,10 @@ OCL4DNNPool::OCL4DNNPool(OCL4DNNPoolConfig config) pad_l_ = config.pad_l; pad_r_ = config.pad_r; pad_b_ = config.pad_b; - height_ = im_in_shape_[0]; - width_ = im_in_shape_[1]; - pooled_height_ = im_out_shape_[0]; - pooled_width_ = im_out_shape_[1]; + height_ = spatial_dims == 1? 1 : im_in_shape_[0]; + width_ = im_in_shape_.back(); + pooled_height_ = spatial_dims == 1? 1 : im_out_shape_[0]; + pooled_width_ = im_out_shape_.back(); count_ = 1; for (int i = 0; i < config.out_shape.size(); ++i) diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 897b95ad8e7f..f38ca6700f5a 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -747,6 +747,84 @@ TEST_P(Test_ONNX_layers, DynamicAxes) testONNXModels("maxpooling_sigmoid_dynamic_axes"); } +TEST_P(Test_ONNX_layers, MaxPool1d) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + } + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + } + testONNXModels("maxpooling_1d"); +} + +TEST_P(Test_ONNX_layers, MaxPoolSigmoid1d) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + } + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + } + testONNXModels("maxpooling_sigmoid_1d"); +} + +TEST_P(Test_ONNX_layers, MaxPool1d_Twise) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + } + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + } + testONNXModels("two_maxpooling_1d"); +} + +TEST_P(Test_ONNX_layers, AvePool1d) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + } + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + } + testONNXModels("average_pooling_1d"); +} + +TEST_P(Test_ONNX_layers, PoolConv1d) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + } + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + } + testONNXModels("pool_conv_1d"); +} + +TEST_P(Test_ONNX_layers, ConvResizePool1d) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + } + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + } + testONNXModels("conv_resize_pool_1d"); +} + INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets()); class Test_ONNX_nets : public Test_ONNX_layers From 0800f6f91b0806bded8eb279aff93ba5d40847f2 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 24 Nov 2020 22:26:10 +0000 Subject: [PATCH 08/11] videoio: add missing getCaptureDomain() methods --- modules/videoio/src/cap_openni.cpp | 1 + modules/videoio/src/cap_openni2.cpp | 1 + 2 files changed, 2 insertions(+) diff --git a/modules/videoio/src/cap_openni.cpp b/modules/videoio/src/cap_openni.cpp index e4dbea80d722..1281dc2f9933 100644 --- a/modules/videoio/src/cap_openni.cpp +++ b/modules/videoio/src/cap_openni.cpp @@ -311,6 +311,7 @@ class CvCapture_OpenNI : public CvCapture virtual bool setProperty(int probIdx, double propVal) CV_OVERRIDE; virtual bool grabFrame() CV_OVERRIDE; virtual IplImage* retrieveFrame(int outputType) CV_OVERRIDE; + virtual int getCaptureDomain() CV_OVERRIDE { return cv::CAP_OPENNI; } bool isOpened() const; diff --git a/modules/videoio/src/cap_openni2.cpp b/modules/videoio/src/cap_openni2.cpp index 926a004196e9..fa5a2fd699f6 100644 --- a/modules/videoio/src/cap_openni2.cpp +++ b/modules/videoio/src/cap_openni2.cpp @@ -119,6 +119,7 @@ class CvCapture_OpenNI2 : public CvCapture virtual bool setProperty(int probIdx, double propVal) CV_OVERRIDE; virtual bool grabFrame() CV_OVERRIDE; virtual IplImage* retrieveFrame(int outputType) CV_OVERRIDE; + virtual int getCaptureDomain() CV_OVERRIDE { return cv::CAP_OPENNI2; } bool isOpened() const; From f28895cd6bcade173a9765afb08d353e440c3fbf Mon Sep 17 00:00:00 2001 From: Gabriel Nascarella Hishida Date: Tue, 24 Nov 2020 22:14:55 -0300 Subject: [PATCH 09/11] doc: Fix example code using deprecated xrange xrange was abandoned and doesn't exist in Python 3. range() works just the same --- .../py_calib3d/py_calibration/py_calibration.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown b/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown index e337999efd32..bba7b90b9ffb 100644 --- a/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown +++ b/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown @@ -209,7 +209,7 @@ find the average error, we calculate the arithmetical mean of the errors calcula calibration images. @code{.py} mean_error = 0 -for i in xrange(len(objpoints)): +for i in range(len(objpoints)): imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist) error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2)/len(imgpoints2) mean_error += error From 387a76ba598fa1e177f8d79a2760ec75a4e9b0e1 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 25 Nov 2020 12:56:12 +0000 Subject: [PATCH 10/11] build: xcode 12 support, cmake fixes --- CMakeLists.txt | 5 +++++ cmake/OpenCVCompilerOptions.cmake | 2 +- cmake/OpenCVFindLibsGrfmt.cmake | 36 ++++++++++++++++--------------- platforms/ios/build_framework.py | 2 +- 4 files changed, 26 insertions(+), 19 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d143a7aeaa5e..f1b542387178 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,6 +32,11 @@ endif() # # Configure CMake policies # + +if(POLICY CMP0025) + cmake_policy(SET CMP0025 NEW) # CMAKE_CXX_COMPILER_ID=AppleClang +endif() + if(POLICY CMP0026) cmake_policy(SET CMP0026 NEW) endif() diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index 9ac671dd3488..21201c12dde8 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -151,7 +151,7 @@ if(CV_GCC OR CV_CLANG) if(CV_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0) add_extra_compiler_option(-Wno-missing-field-initializers) # GCC 4.x emits warnings about {}, fixed in GCC 5+ endif() - if(CV_CLANG AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10.0) + if(CV_CLANG AND NOT CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 10.0) add_extra_compiler_option(-Wno-deprecated-enum-enum-conversion) add_extra_compiler_option(-Wno-deprecated-anon-enum-enum-conversion) endif() diff --git a/cmake/OpenCVFindLibsGrfmt.cmake b/cmake/OpenCVFindLibsGrfmt.cmake index fcf716b976fd..4ad44fe833db 100644 --- a/cmake/OpenCVFindLibsGrfmt.cmake +++ b/cmake/OpenCVFindLibsGrfmt.cmake @@ -15,11 +15,12 @@ else() endif() if(NOT ZLIB_FOUND) - ocv_clear_vars(ZLIB_LIBRARY ZLIB_LIBRARIES ZLIB_INCLUDE_DIRS) + ocv_clear_vars(ZLIB_LIBRARY ZLIB_LIBRARIES ZLIB_INCLUDE_DIR) - set(ZLIB_LIBRARY zlib) + set(ZLIB_LIBRARY zlib CACHE INTERNAL "") add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/zlib") - set(ZLIB_INCLUDE_DIRS "${${ZLIB_LIBRARY}_SOURCE_DIR}" "${${ZLIB_LIBRARY}_BINARY_DIR}") + set(ZLIB_INCLUDE_DIR "${${ZLIB_LIBRARY}_SOURCE_DIR}" "${${ZLIB_LIBRARY}_BINARY_DIR}" CACHE INTERNAL "") + set(ZLIB_INCLUDE_DIRS ${ZLIB_INCLUDE_DIR}) set(ZLIB_LIBRARIES ${ZLIB_LIBRARY}) ocv_parse_header2(ZLIB "${${ZLIB_LIBRARY}_SOURCE_DIR}/zlib.h" ZLIB_VERSION) @@ -37,16 +38,17 @@ if(WITH_JPEG) ocv_clear_vars(JPEG_LIBRARY JPEG_LIBRARIES JPEG_INCLUDE_DIR) if(NOT BUILD_JPEG_TURBO_DISABLE) - set(JPEG_LIBRARY libjpeg-turbo) + set(JPEG_LIBRARY libjpeg-turbo CACHE INTERNAL "") set(JPEG_LIBRARIES ${JPEG_LIBRARY}) add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg-turbo") - set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}/src") + set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}/src" CACHE INTERNAL "") else() - set(JPEG_LIBRARY libjpeg) + set(JPEG_LIBRARY libjpeg CACHE INTERNAL "") set(JPEG_LIBRARIES ${JPEG_LIBRARY}) add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjpeg") - set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}") + set(JPEG_INCLUDE_DIR "${${JPEG_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "") endif() + set(JPEG_INCLUDE_DIRS "${JPEG_INCLUDE_DIR}") endif() macro(ocv_detect_jpeg_version header_file) @@ -83,10 +85,10 @@ if(WITH_TIFF) if(NOT TIFF_FOUND) ocv_clear_vars(TIFF_LIBRARY TIFF_LIBRARIES TIFF_INCLUDE_DIR) - set(TIFF_LIBRARY libtiff) + set(TIFF_LIBRARY libtiff CACHE INTERNAL "") set(TIFF_LIBRARIES ${TIFF_LIBRARY}) add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libtiff") - set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}") + set(TIFF_INCLUDE_DIR "${${TIFF_LIBRARY}_SOURCE_DIR}" "${${TIFF_LIBRARY}_BINARY_DIR}" CACHE INTERNAL "") ocv_parse_header("${${TIFF_LIBRARY}_SOURCE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION) endif() @@ -128,12 +130,12 @@ endif() if(WITH_WEBP AND NOT WEBP_FOUND AND (NOT ANDROID OR HAVE_CPUFEATURES) ) - - set(WEBP_LIBRARY libwebp) + ocv_clear_vars(WEBP_LIBRARY WEBP_INCLUDE_DIR) + set(WEBP_LIBRARY libwebp CACHE INTERNAL "") set(WEBP_LIBRARIES ${WEBP_LIBRARY}) add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libwebp") - set(WEBP_INCLUDE_DIR "${${WEBP_LIBRARY}_SOURCE_DIR}/src") + set(WEBP_INCLUDE_DIR "${${WEBP_LIBRARY}_SOURCE_DIR}/src" CACHE INTERNAL "") set(HAVE_WEBP 1) endif() @@ -164,10 +166,10 @@ if(WITH_JASPER) if(NOT JASPER_FOUND) ocv_clear_vars(JASPER_LIBRARY JASPER_LIBRARIES JASPER_INCLUDE_DIR) - set(JASPER_LIBRARY libjasper) + set(JASPER_LIBRARY libjasper CACHE INTERNAL "") set(JASPER_LIBRARIES ${JASPER_LIBRARY}) add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libjasper") - set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}") + set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "") endif() set(HAVE_JASPER YES) @@ -197,10 +199,10 @@ if(WITH_PNG) if(NOT PNG_FOUND) ocv_clear_vars(PNG_LIBRARY PNG_LIBRARIES PNG_INCLUDE_DIR PNG_PNG_INCLUDE_DIR HAVE_LIBPNG_PNG_H PNG_DEFINITIONS) - set(PNG_LIBRARY libpng) + set(PNG_LIBRARY libpng CACHE INTERNAL "") set(PNG_LIBRARIES ${PNG_LIBRARY}) add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libpng") - set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}") + set(PNG_INCLUDE_DIR "${${PNG_LIBRARY}_SOURCE_DIR}" CACHE INTERNAL "") set(PNG_DEFINITIONS "") ocv_parse_header("${PNG_INCLUDE_DIR}/png.h" PNG_VERSION_LINES PNG_LIBPNG_VER_MAJOR PNG_LIBPNG_VER_MINOR PNG_LIBPNG_VER_RELEASE) endif() @@ -242,7 +244,7 @@ if(WITH_GDAL) endif() endif() -if (WITH_GDCM) +if(WITH_GDCM) find_package(GDCM QUIET) if(NOT GDCM_FOUND) set(HAVE_GDCM NO) diff --git a/platforms/ios/build_framework.py b/platforms/ios/build_framework.py index 223542d9f54c..bc765872a020 100755 --- a/platforms/ios/build_framework.py +++ b/platforms/ios/build_framework.py @@ -31,7 +31,7 @@ import glob, re, os, os.path, shutil, string, sys, argparse, traceback, multiprocessing from subprocess import check_call, check_output, CalledProcessError -IPHONEOS_DEPLOYMENT_TARGET='8.0' # default, can be changed via command line options or environment variable +IPHONEOS_DEPLOYMENT_TARGET='9.0' # default, can be changed via command line options or environment variable def execute(cmd, cwd = None): print("Executing: %s in %s" % (cmd, cwd), file=sys.stderr) From 2cf2456f4c39788ba167d04579fb2251f4fd9a07 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Thu, 26 Nov 2020 21:30:21 +0000 Subject: [PATCH 11/11] dnn(test): skip unstable GatherMultiOutput OCL_FP16 test --- modules/dnn/test/test_onnx_importer.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index f38ca6700f5a..9ba10d4b4718 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -718,6 +718,9 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias) TEST_P(Test_ONNX_layers, GatherMultiOutput) { + if (cvtest::skipUnstableTests && backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) + throw SkipTestException("Skip unstable test: https://github.com/opencv/opencv/issues/18937"); + #if defined(INF_ENGINE_RELEASE) if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE);